diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java index 8105e8ba54..919ad7c3d2 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java @@ -348,8 +348,8 @@ protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive AlterTableDesc alterTable = work.getAlterTblDesc(); if (alterTable != null) { - Table table = hive.getTable(SessionState.get().getCurrentDatabase(), - Utilities.getDbTableName(alterTable.getOldName())[1], false); + final String tableName = Utilities.getTableName(alterTable.getOldName()).getTable(); + Table table = hive.getTable(SessionState.get().getCurrentDatabase(), tableName, false); Partition part = null; if (alterTable.getPartSpec() != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index cac14a6ab8..00ef210170 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -46,6 +46,7 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.hive.common.JavaUtils; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.ValidWriteIdList; @@ -1641,8 +1642,8 @@ private void acquireLocks() throws CommandProcessorResponse { fsd1.getDirName().compareTo(fsd2.getDirName())); for (FileSinkDesc desc : acidSinks) { TableDesc tableInfo = desc.getTableInfo(); - long writeId = queryTxnMgr.getTableWriteId(Utilities.getDatabaseName(tableInfo.getTableName()), - Utilities.getTableName(tableInfo.getTableName())); + final TableName tn = Utilities.getTableName(tableInfo.getTableName()); + long writeId = queryTxnMgr.getTableWriteId(tn.getDb(), tn.getTable()); desc.setTableWriteId(writeId); /** @@ -1671,9 +1672,8 @@ private void acquireLocks() throws CommandProcessorResponse { DDLDescWithWriteId acidDdlDesc = plan.getAcidDdlDesc(); boolean hasAcidDdl = acidDdlDesc != null && acidDdlDesc.mayNeedWriteId(); if (hasAcidDdl) { - String fqTableName = acidDdlDesc.getFullTableName(); - long writeId = queryTxnMgr.getTableWriteId( - Utilities.getDatabaseName(fqTableName), Utilities.getTableName(fqTableName)); + final TableName tn = Utilities.getTableName(acidDdlDesc.getFullTableName()); + long writeId = queryTxnMgr.getTableWriteId(tn.getDb(), tn.getTable()); acidDdlDesc.setWriteId(writeId); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/cache/results/QueryResultsCache.java b/ql/src/java/org/apache/hadoop/hive/ql/cache/results/QueryResultsCache.java index a51b7e750b..a1280e5824 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/cache/results/QueryResultsCache.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/cache/results/QueryResultsCache.java @@ -51,6 +51,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.metrics.common.Metrics; import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; import org.apache.hadoop.hive.common.metrics.common.MetricsFactory; @@ -651,7 +652,7 @@ public void notifyTableChanged(String dbName, String tableName, long updateTime) List entriesToInvalidate = null; rwLock.writeLock().lock(); try { - String key = (dbName.toLowerCase() + "." + tableName.toLowerCase()); + String key = TableName.getDbTable(dbName,tableName).toLowerCase(); Set entriesForTable = tableToEntryMap.get(key); if (entriesForTable != null) { // Possible concurrent modification issues if we try to remove cache entries while diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java index cf00d7b820..3f89aa0df7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java @@ -86,8 +86,7 @@ private ColumnStatistics constructColumnStatsFromInput() colStats.getStatsDesc().getTableName()); return colStats; } - String dbName = work.dbName(); - String tableName = work.getTableName(); + TableName tableName = work.getTableName(); String partName = work.getPartName(); String colName = work.getColName(); String columnType = work.getColType(); @@ -273,7 +272,7 @@ private ColumnStatistics constructColumnStatsFromInput() } else { throw new SemanticException("Unsupported type"); } - ColumnStatisticsDesc statsDesc = getColumnStatsDesc(dbName, tableName, + ColumnStatisticsDesc statsDesc = getColumnStatsDesc(tableName.getDb(), tableName.getTable(), partName, partName == null); ColumnStatistics colStat = new ColumnStatistics(); colStat.setStatsDesc(statsDesc); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 1ab4d62da2..d7863f5aa8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -62,6 +62,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.type.HiveDecimal; @@ -1328,7 +1329,7 @@ private int addPartitions(Hive db, AddPartitionDesc addPartitionDesc) throws Hiv * @throws HiveException */ private int renamePartition(Hive db, RenamePartitionDesc renamePartitionDesc) throws HiveException { - String tableName = renamePartitionDesc.getTableName(); + String tableName = renamePartitionDesc.getTableName().getTable(); LinkedHashMap oldPartSpec = renamePartitionDesc.getOldPartSpec(); if (!allowOperationInReplicationScope(db, tableName, oldPartSpec, renamePartitionDesc.getReplicationSpec())) { @@ -1342,8 +1343,7 @@ private int renamePartition(Hive db, RenamePartitionDesc renamePartitionDesc) th return 0; } - String names[] = Utilities.getDbTableName(tableName); - if (Utils.isBootstrapDumpInProgress(db, names[0])) { + if (Utils.isBootstrapDumpInProgress(db, Utilities.getTableName(tableName).getDb())) { LOG.error("DDLTask: Rename Partition not allowed as bootstrap dump in progress"); throw new HiveException("Rename Partition: Not allowed as bootstrap dump in progress"); } @@ -2159,10 +2159,10 @@ private int msck(Hive db, MsckDesc msckDesc) { try { msck = new Msck( false, false); msck.init(db.getConf()); - String[] names = Utilities.getDbTableName(msckDesc.getTableName()); - MsckInfo msckInfo = new MsckInfo(SessionState.get().getCurrentCatalog(), names[0], - names[1], msckDesc.getPartSpecs(), msckDesc.getResFile(), - msckDesc.isRepairPartitions(), msckDesc.isAddPartitions(), msckDesc.isDropPartitions(), -1); + final TableName tableName = Utilities.getTableName(msckDesc.getTableName()); + MsckInfo msckInfo = new MsckInfo(SessionState.get().getCurrentCatalog(), tableName.getDb(), tableName.getTable(), + msckDesc.getPartSpecs(), msckDesc.getResFile(), msckDesc.isRepairPartitions(), msckDesc.isAddPartitions(), + msckDesc.isDropPartitions(), -1); return msck.repair(msckInfo); } catch (MetaException e) { LOG.error("Unable to create msck instance.", e); @@ -3476,7 +3476,8 @@ private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException, // when column name is specified in describe table DDL, colPath will // will be table_name.column_name String colName = colPath.split("\\.")[1]; - String[] dbTab = Utilities.getDbTableName(tableName); + final TableName tNameObj = Utilities.getTableName(tableName); + String[] dbTabLower = new String[] {tNameObj.getDb().toLowerCase(), tNameObj.getTable().toLowerCase()}; List colNames = new ArrayList(); colNames.add(colName.toLowerCase()); if (null == part) { @@ -3498,9 +3499,8 @@ private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException, StatsSetupConst.setColumnStatsState(tblProps, colNames); } else { cols = Hive.getFieldsFromDeserializer(colPath, deserializer); - List parts = db.getPartitionNames(dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), (short) -1); - AggrStats aggrStats = db.getAggrColStatsFor( - dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, parts, false); + List parts = db.getPartitionNames(dbTabLower[0], dbTabLower[1], (short) -1); + AggrStats aggrStats = db.getAggrColStatsFor(dbTabLower[0], dbTabLower[1], colNames, parts, false); colStats = aggrStats.getColStats(); if (parts.size() == aggrStats.getPartsFound()) { StatsSetupConst.setColumnStatsState(tblProps, colNames); @@ -3511,15 +3511,14 @@ private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException, tbl.setParameters(tblProps); } else { cols = Hive.getFieldsFromDeserializer(colPath, deserializer); - colStats = db.getTableColumnStatistics( - dbTab[0].toLowerCase(), dbTab[1].toLowerCase(), colNames, false); + colStats = db.getTableColumnStatistics(dbTabLower[0], dbTabLower[1], colNames, false); } } else { List partitions = new ArrayList(); partitions.add(part.getName()); cols = Hive.getFieldsFromDeserializer(colPath, deserializer); - colStats = db.getPartitionColumnStatistics(dbTab[0].toLowerCase(), - dbTab[1].toLowerCase(), partitions, colNames, false).get(part.getName()); + colStats = db.getPartitionColumnStatistics(dbTabLower[0], dbTabLower[1], partitions, colNames, false) + .get(part.getName()); } } else { cols = Hive.getFieldsFromDeserializer(colPath, deserializer); @@ -3692,8 +3691,7 @@ static StringBuilder appendNonNull(StringBuilder builder, Object value, boolean */ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) { - String names[] = Utilities.getDbTableName(alterTbl.getOldName()); - if (Utils.isBootstrapDumpInProgress(db, names[0])) { + if (Utils.isBootstrapDumpInProgress(db, Utilities.getTableName(alterTbl.getOldName()).getDb())) { LOG.error("DDLTask: Rename Table not allowed as bootstrap dump in progress"); throw new HiveException("Rename Table: Not allowed as bootstrap dump in progress"); } @@ -3853,8 +3851,9 @@ private static StorageDescriptor retrieveStorageDescriptor(Table tbl, Partition } if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) { - tbl.setDbName(Utilities.getDatabaseName(alterTbl.getNewName())); - tbl.setTableName(Utilities.getTableName(alterTbl.getNewName())); + final TableName tn = Utilities.getTableName(alterTbl.getNewName()); + tbl.setDbName(tn.getDb()); + tbl.setTableName(tn.getTable()); } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCOLS) { StorageDescriptor sd = retrieveStorageDescriptor(tbl, part); String serializationLib = sd.getSerdeInfo().getSerializationLib(); @@ -4311,9 +4310,8 @@ private void checkMmLb(Partition part) throws HiveException { private int dropConstraint(Hive db, AlterTableDesc alterTbl) throws SemanticException, HiveException { try { - db.dropConstraint(Utilities.getDatabaseName(alterTbl.getOldName()), - Utilities.getTableName(alterTbl.getOldName()), - alterTbl.getConstraintName()); + final TableName tn = Utilities.getTableName(alterTbl.getOldName()); + db.dropConstraint(tn.getDb(), tn.getTable(), alterTbl.getConstraintName()); } catch (NoSuchObjectException e) { throw new HiveException(e); } @@ -4855,10 +4853,10 @@ private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) throws Exceptio // find out database name and table name of target table String targetTableName = crtTbl.getTableName(); - String[] names = Utilities.getDbTableName(targetTableName); - tbl.setDbName(names[0]); - tbl.setTableName(names[1]); + final TableName tableName = Utilities.getTableName(targetTableName); + tbl.setDbName(tableName.getDb()); + tbl.setTableName(tableName.getTable()); // using old table object, hence reset the owner to current user for new table. tbl.setOwner(SessionState.getUserFromAuthenticator()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 61e34308bc..abe0d16857 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -98,6 +98,7 @@ import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.StringInternUtils; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; @@ -2211,59 +2212,20 @@ public static String formatBinaryString(byte[] array, int start, int length) { } /** - * Extract db and table name from dbtable string, where db and table are separated by "." - * If there is no db name part, set the current sessions default db - * @param dbtable - * @return String array with two elements, first is db name, second is table name - * @throws HiveException - */ - public static String[] getDbTableName(String dbtable) throws SemanticException { - return getDbTableName(SessionState.get().getCurrentDatabase(), dbtable); - } - - public static String[] getDbTableName(String defaultDb, String dbtable) throws SemanticException { - if (dbtable == null) { - return new String[2]; - } - String[] names = dbtable.split("\\."); - switch (names.length) { - case 2: - return names; - case 1: - return new String [] {defaultDb, dbtable}; - default: - throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME, dbtable); - } - } - - /** - * Accepts qualified name which is in the form of dbname.tablename and returns dbname from it - * - * @param dbTableName - * @return dbname - * @throws SemanticException input string is not qualified name - */ - public static String getDatabaseName(String dbTableName) throws SemanticException { - String[] split = dbTableName.split("\\."); - if (split.length != 2) { - throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME, dbTableName); - } - return split[0]; - } - - /** - * Accepts qualified name which is in the form of dbname.tablename and returns tablename from it + * Accepts qualified name which is in the form of table, dbname.tablename or catalog.dbname.tablename and returns a + * {@link TableName} * - * @param dbTableName - * @return tablename - * @throws SemanticException input string is not qualified name + * @param dbTableName, not null + * @return a {@link TableName} + * @throws SemanticException if dbTableName is null */ - public static String getTableName(String dbTableName) throws SemanticException { - String[] split = dbTableName.split("\\."); - if (split.length != 2) { - throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME, dbTableName); + public static TableName getTableName(String dbTableName) throws SemanticException { + try { + return TableName + .fromString(dbTableName, SessionState.get().getCurrentCatalog(), SessionState.get().getCurrentDatabase()); + } catch (IllegalArgumentException e) { + throw new SemanticException(e.getCause()); } - return split[1]; } public static void validateColumnNames(List colNames, List checkCols) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java index 7539281f1f..aa00754af7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hive.ql.exec.repl.incremental.IncrementalLoadEventsIterator; import org.apache.hadoop.hive.ql.exec.repl.incremental.IncrementalLoadTasksBuilder; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; +import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.session.LineageState; import org.apache.hadoop.hive.ql.exec.Task; @@ -60,7 +61,7 @@ public ReplLoadWork(HiveConf hiveConf, String dumpDirectory, String dbNameToLoadIn, String tableNameToLoadIn, LineageState lineageState, boolean isIncrementalDump, Long eventTo, - List pathsToCopyIterator) throws IOException { + List pathsToCopyIterator) throws IOException, SemanticException { this.tableNameToLoadIn = tableNameToLoadIn; sessionStateLineageState = lineageState; this.dumpDirectory = dumpDirectory; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java index 3e0c969d4d..7c98c49ff0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/incremental/IncrementalLoadTasksBuilder.java @@ -20,6 +20,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; @@ -28,6 +29,7 @@ import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.repl.ReplStateLogWork; import org.apache.hadoop.hive.ql.exec.repl.util.AddDependencyToLeaves; import org.apache.hadoop.hive.ql.exec.repl.util.TaskTracker; @@ -50,6 +52,7 @@ import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork; import org.apache.hadoop.hive.ql.plan.ReplTxnWork; +import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.stats.StatsUtils; import org.slf4j.Logger; @@ -65,7 +68,7 @@ * Iterate through the dump directory and create tasks to load the events. */ public class IncrementalLoadTasksBuilder { - private final String dbName, tableName; + private final TableName tableName; private final IncrementalLoadEventsIterator iterator; private final HashSet inputs; private final HashSet outputs; @@ -76,9 +79,13 @@ private final Long eventTo; public IncrementalLoadTasksBuilder(String dbName, String tableName, String loadPath, - IncrementalLoadEventsIterator iterator, HiveConf conf, Long eventTo) { - this.dbName = dbName; - this.tableName = tableName; + IncrementalLoadEventsIterator iterator, HiveConf conf, Long eventTo) throws SemanticException { + try { + this.tableName = TableName.fromString(tableName, SessionState.get().getCurrentCatalog(), dbName); + } catch (IllegalArgumentException e) { + throw new SemanticException(e.getCause()); + } + this.iterator = iterator; inputs = new HashSet<>(); outputs = new HashSet<>(); @@ -104,14 +111,14 @@ public IncrementalLoadTasksBuilder(String dbName, String tableName, String loadP String location = dir.getPath().toUri().toString(); DumpMetaData eventDmd = new DumpMetaData(new Path(location), conf); - if (!shouldReplayEvent(dir, eventDmd.getDumpType(), dbName, tableName)) { - this.log.debug("Skipping event {} from {} for table {}.{} maxTasks: {}", - eventDmd.getDumpType(), dir.getPath().toUri(), dbName, tableName, tracker.numberOfTasks()); + if (!shouldReplayEvent(dir, eventDmd.getDumpType(), tableName.getDb(), tableName.getTable())) { + this.log.debug("Skipping event {} from {} for table {} maxTasks: {}", + eventDmd.getDumpType(), dir.getPath().toUri(), tableName.getDbTable(), tracker.numberOfTasks()); continue; } - this.log.debug("Loading event {} from {} for table {}.{} maxTasks: {}", - eventDmd.getDumpType(), dir.getPath().toUri(), dbName, tableName, tracker.numberOfTasks()); + this.log.debug("Loading event {} from {} for table {} maxTasks: {}", + eventDmd.getDumpType(), dir.getPath().toUri(), tableName.getDbTable(), tracker.numberOfTasks()); // event loads will behave similar to table loads, with one crucial difference // precursor order is strict, and each event must be processed after the previous one. @@ -132,7 +139,7 @@ public IncrementalLoadTasksBuilder(String dbName, String tableName, String loadP // Once this entire chain is generated, we add evTaskRoot to rootTasks, so as to execute the // entire chain - MessageHandler.Context context = new MessageHandler.Context(dbName, tableName, location, + MessageHandler.Context context = new MessageHandler.Context(tableName.getDb(), tableName.getTable(), location, taskChainTail, eventDmd, conf, hive, driverContext.getCtx(), this.log); List> evTasks = analyzeEventLoad(context); @@ -155,12 +162,12 @@ public IncrementalLoadTasksBuilder(String dbName, String tableName, String loadP // if no events were replayed, then add a task to update the last repl id of the database/table to last event id. if (taskChainTail == evTaskRoot) { String lastEventid = eventTo.toString(); - if (StringUtils.isEmpty(tableName)) { - taskChainTail = dbUpdateReplStateTask(dbName, lastEventid, taskChainTail); - this.log.debug("no events to replay, set last repl id of db " + dbName + " to " + lastEventid); + if (StringUtils.isEmpty(tableName.getTable())) { + taskChainTail = dbUpdateReplStateTask(tableName.getDb(), lastEventid, taskChainTail); + this.log.debug("no events to replay, set last repl id of db " + tableName.getDb() + " to " + lastEventid); } else { - taskChainTail = tableUpdateReplStateTask(dbName, tableName, null, lastEventid, taskChainTail); - this.log.debug("no events to replay, set last repl id of table " + dbName + "." + tableName + " to " + + taskChainTail = tableUpdateReplStateTask(tableName, null, lastEventid, taskChainTail); + this.log.debug("no events to replay, set last repl id of table " + tableName.getDb() + "." + tableName + " to " + lastEventid); } } @@ -266,16 +273,15 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa return updateReplIdTxnTask; } - private Task tableUpdateReplStateTask(String dbName, String tableName, - Map partSpec, String replState, - Task preCursor) throws SemanticException { + private Task tableUpdateReplStateTask(TableName tableName, Map partSpec, + String replState, Task preCursor) throws SemanticException { HashMap mapProp = new HashMap<>(); mapProp.put(ReplicationSpec.KEY.CURR_STATE_ID.toString(), replState); AlterTableDesc alterTblDesc = new AlterTableDesc( AlterTableDesc.AlterTableTypes.ADDPROPS, new ReplicationSpec(replState, replState)); alterTblDesc.setProps(mapProp); - alterTblDesc.setOldName(StatsUtils.getFullyQualifiedTableName(dbName, tableName)); + alterTblDesc.setOldName(tableName.getDbTable()); alterTblDesc.setPartSpec((HashMap) partSpec); Task updateReplIdTask = TaskFactory.get(new DDLWork(inputs, outputs, alterTblDesc), conf); @@ -332,13 +338,12 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa for (UpdatedMetaDataTracker.UpdateMetaData updateMetaData : updatedMetaDataTracker.getUpdateMetaDataList()) { String replState = updateMetaData.getReplState(); - String dbName = updateMetaData.getDbName(); - String tableName = updateMetaData.getTableName(); + final TableName tableName = TableName.fromString(updateMetaData.getTableName(),null, updateMetaData.getDbName()); // If any partition is updated, then update repl state in partition object if (needCommitTx) { if (updateMetaData.getPartitionsList().size() > 0) { - updateReplIdTask = getMigrationCommitTxnTask(dbName, tableName, + updateReplIdTask = getMigrationCommitTxnTask(tableName.getDb(), tableName.getTable(), updateMetaData.getPartitionsList(), replState, isDatabaseLoad, barrierTask); tasks.add(updateReplIdTask); // commit txn task will update repl id for table and database also. @@ -346,7 +351,7 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa } } else { for (final Map partSpec : updateMetaData.getPartitionsList()) { - updateReplIdTask = tableUpdateReplStateTask(dbName, tableName, partSpec, replState, barrierTask); + updateReplIdTask = tableUpdateReplStateTask(tableName, partSpec, replState, barrierTask); tasks.add(updateReplIdTask); } } @@ -354,24 +359,24 @@ private boolean shouldReplayEvent(FileStatus dir, DumpType dumpType, String dbNa // If any table/partition is updated, then update repl state in table object if (tableName != null) { if (needCommitTx) { - updateReplIdTask = getMigrationCommitTxnTask(dbName, tableName, null, + updateReplIdTask = getMigrationCommitTxnTask(tableName.getDb(), tableName.getTable(), null, replState, isDatabaseLoad, barrierTask); tasks.add(updateReplIdTask); // commit txn task will update repl id for database also. break; } - updateReplIdTask = tableUpdateReplStateTask(dbName, tableName, null, replState, barrierTask); + updateReplIdTask = tableUpdateReplStateTask(tableName, null, replState, barrierTask); tasks.add(updateReplIdTask); } // If any table/partition is updated, then update repl state in db object if (needCommitTx) { - updateReplIdTask = getMigrationCommitTxnTask(dbName, null, null, + updateReplIdTask = getMigrationCommitTxnTask(tableName.getDb(), null, null, replState, isDatabaseLoad, barrierTask); tasks.add(updateReplIdTask); } else if (isDatabaseLoad) { // For table level load, need not update replication state for the database - updateReplIdTask = dbUpdateReplStateTask(dbName, replState, barrierTask); + updateReplIdTask = dbUpdateReplStateTask(tableName.getDb(), replState, barrierTask); tasks.add(updateReplIdTask); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java index ec00ab6d6e..3310e1e00f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java @@ -103,6 +103,7 @@ import org.apache.commons.compress.utils.IOUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.llap.registry.impl.LlapRegistryService; @@ -474,7 +475,7 @@ private String getQueueName(ExecutionMode mode, HiveConf conf) { List tableNames = new ArrayList<>(); for (Entity entity : entities) { if (entity.getType() == Entity.Type.TABLE) { - tableNames.add(entity.getTable().getDbName() + "." + entity.getTable().getTableName()); + tableNames.add(TableName.getDbTable(entity.getTable().getDbName(),entity.getTable().getTableName())); } } return tableNames; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java index 36a92aab57..cd52f2e3d0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java @@ -19,6 +19,7 @@ import java.util.Set; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -59,23 +60,21 @@ public void run(HookContext hookContext) throws Exception { // of the object, before it was modified by StatsTask. // Get the latest versions of the object case TABLE: { - String dbName = re.getTable().getDbName(); - String tblName = re.getTable().getTableName(); - Table t = db.getTable(dbName, tblName); + final TableName tableName = TableName.fromString(re.getTable().getTableName(), null, re.getTable().getDbName()); + Table t = db.getTable(tableName.getDb(), tableName.getTable()); t.setLastAccessTime(lastAccessTime); - db.alterTable(dbName + "." + tblName, t, false, null, false); + db.alterTable(tableName.getDbTable(), t, false, null, false); break; } case PARTITION: { - String dbName = re.getTable().getDbName(); - String tblName = re.getTable().getTableName(); + final TableName tableName = TableName.fromString(re.getTable().getTableName(), null, re.getTable().getDbName()); Partition p = re.getPartition(); - Table t = db.getTable(dbName, tblName); + Table t = db.getTable(tableName.getDb(), tableName.getTable()); p = db.getPartition(t, p.getSpec(), false); p.setLastAccessTime(lastAccessTime); - db.alterPartition(null, dbName, tblName, p, null, false); + db.alterPartition(null, tableName.getDb(), tableName.getTable(), p, null, false); t.setLastAccessTime(lastAccessTime); - db.alterTable(dbName + "." + tblName, t, false, null, false); + db.alterTable(tableName.getDbTable(), t, false, null, false); break; } default: diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/CheckConstraint.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/CheckConstraint.java index af457883de..79a6ec280e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/CheckConstraint.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/CheckConstraint.java @@ -25,6 +25,7 @@ import java.util.TreeMap; import org.apache.avro.generic.GenericData; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; /** @@ -99,7 +100,7 @@ public String getDatabaseName() { @Override public String toString() { StringBuilder sb = new StringBuilder(); - sb.append("Check Constraints for " + databaseName + "." + tableName + ":"); + sb.append("Check Constraints for " + TableName.getDbTable(databaseName, tableName) + ":"); sb.append("["); if (checkConstraints != null && checkConstraints.size() > 0) { for (Map.Entry> me : checkConstraints.entrySet()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/DefaultConstraint.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/DefaultConstraint.java index 59df3daf6e..a336a70221 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/DefaultConstraint.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/DefaultConstraint.java @@ -24,6 +24,7 @@ import java.util.Map; import java.util.TreeMap; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; /** @@ -99,7 +100,7 @@ public String getDatabaseName() { @Override public String toString() { StringBuilder sb = new StringBuilder(); - sb.append("Default Constraints for " + databaseName + "." + tableName + ":"); + sb.append("Default Constraints for " + TableName.getDbTable(databaseName, tableName) + ":"); sb.append("["); if (defaultConstraints != null && defaultConstraints.size() > 0) { for (Map.Entry> me : defaultConstraints.entrySet()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/ForeignKeyInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/ForeignKeyInfo.java index 9ae14cd848..19a4af758a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/ForeignKeyInfo.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/ForeignKeyInfo.java @@ -24,6 +24,7 @@ import java.util.List; import java.util.TreeMap; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; /** @@ -112,7 +113,7 @@ public void setForeignKeys(Map> foreignKeys) { @Override public String toString() { StringBuilder sb = new StringBuilder(); - sb.append("Foreign Keys for " + childDatabaseName+"."+childTableName+":"); + sb.append("Foreign Keys for " + TableName.getDbTable(childDatabaseName, childTableName) +":"); sb.append("["); if (foreignKeys != null && foreignKeys.size() > 0) { for (Map.Entry> me : foreignKeys.entrySet()) { @@ -120,9 +121,10 @@ public String toString() { List currCol = me.getValue(); if (currCol != null && currCol.size() > 0) { for (ForeignKeyCol fkc : currCol) { - sb.append (" (Parent Column Name: " + fkc.parentDatabaseName + - "."+ fkc.parentTableName + "." + fkc.parentColName + - ", Column Name: " + fkc.childColName + ", Key Sequence: " + fkc.position+ "),"); + sb.append(String + .join("", " (Parent Column Name: ", TableName.getDbTable(fkc.parentDatabaseName, fkc.parentTableName), + ".", fkc.parentColName, ", Column Name: ", fkc.childColName, ", Key Sequence: ", + fkc.position.toString(), "),")); } sb.setLength(sb.length()-1); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 3a313b0024..50e9660423 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -126,6 +126,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveAugmentMaterializationRule; import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils; +import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; @@ -656,15 +657,15 @@ public void alterTable(Table newTbl, boolean cascade, EnvironmentContext environ public void alterTable(String fullyQlfdTblName, Table newTbl, EnvironmentContext environmentContext, boolean transactional) throws HiveException { - String[] names = Utilities.getDbTableName(fullyQlfdTblName); - alterTable(null, names[0], names[1], newTbl, false, environmentContext, transactional); + final TableName tName = Utilities.getTableName(fullyQlfdTblName); + alterTable(tName.getCat(), tName.getDb(), tName.getTable(), newTbl, false, environmentContext, transactional); } public void alterTable(String fullyQlfdTblName, Table newTbl, boolean cascade, EnvironmentContext environmentContext, boolean transactional) throws HiveException { - String[] names = Utilities.getDbTableName(fullyQlfdTblName); - alterTable(null, names[0], names[1], newTbl, cascade, environmentContext, transactional); + final TableName tName = Utilities.getTableName(fullyQlfdTblName); + alterTable(tName.getCat(), tName.getDb(), tName.getTable(), newTbl, cascade, environmentContext, transactional); } public void alterTable(String catName, String dbName, String tblName, Table newTbl, boolean cascade, @@ -744,8 +745,8 @@ public void updateCreationMetadata(String dbName, String tableName, CreationMeta public void alterPartition(String tblName, Partition newPart, EnvironmentContext environmentContext, boolean transactional) throws InvalidOperationException, HiveException { - String[] names = Utilities.getDbTableName(tblName); - alterPartition(null, names[0], names[1], newPart, environmentContext, transactional); + final TableName tn = Utilities.getTableName(tblName); + alterPartition(tn.getCat(), tn.getDb(), tn.getTable(), newPart, environmentContext, transactional); } /** @@ -825,7 +826,7 @@ private void validatePartition(Partition newPart) throws HiveException { public void alterPartitions(String tblName, List newParts, EnvironmentContext environmentContext, boolean transactional) throws InvalidOperationException, HiveException { - String[] names = Utilities.getDbTableName(tblName); + final TableName tn = Utilities.getTableName(tblName); List newTParts = new ArrayList(); try { @@ -845,7 +846,7 @@ public void alterPartitions(String tblName, List newParts, } newTParts.add(tmpPart.getTPartition()); } - getMSC().alter_partitions(names[0], names[1], newTParts, environmentContext, + getMSC().alter_partitions(tn.getDb(), tn.getTable(), newTParts, environmentContext, tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null, tableSnapshot != null ? tableSnapshot.getWriteId() : -1); } catch (MetaException e) { @@ -1043,8 +1044,8 @@ public void createTable(Table tbl, boolean ifNotExists) throws HiveException { * thrown if the drop fails */ public void dropTable(String tableName, boolean ifPurge) throws HiveException { - String[] names = Utilities.getDbTableName(tableName); - dropTable(names[0], names[1], true, true, ifPurge); + final TableName tn = Utilities.getTableName(tableName); + dropTable(tn.getDb(), tn.getTable(), true, true, ifPurge); } /** @@ -1185,8 +1186,8 @@ public Table getTable(final String tableName) throws HiveException { * table doesn't exist */ public Table getTable(final String tableName, boolean throwException) throws HiveException { - String[] names = Utilities.getDbTableName(tableName); - return this.getTable(names[0], names[1], throwException); + final TableName tn = Utilities.getTableName(tableName); + return this.getTable(tn.getDb(), tn.getTable(), throwException); } /** @@ -1201,12 +1202,12 @@ public Table getTable(final String tableName, boolean throwException) throws Hiv * if there's an internal error or if the table doesn't exist */ public Table getTable(final String dbName, final String tableName) throws HiveException { - // TODO: catalog... etc everywhere - if (tableName.contains(".")) { - String[] names = Utilities.getDbTableName(tableName); - return this.getTable(names[0], names[1], true); - } else { - return this.getTable(dbName, tableName, true); + // TODO: catalog... etc everywhere + try { + final TableName tn = TableName.fromString(tableName, SessionState.get().getCurrentCatalog(), dbName); + return this.getTable(tn.getDb(), tn.getTable(), true); + } catch (IllegalArgumentException e) { + throw new SemanticException(e.getCause()); } } @@ -1538,10 +1539,10 @@ public Table apply(org.apache.hadoop.hive.metastore.api.Table table) { } } - public List getValidMaterializedView(String dbName, String materializedViewName, - List tablesUsed, boolean forceMVContentsUpToDate, HiveTxnManager txnMgr) throws HiveException { - return getValidMaterializedViews(dbName, ImmutableList.of(materializedViewName), - tablesUsed, forceMVContentsUpToDate, txnMgr); + public List getValidMaterializedView(TableName tableName, List tablesUsed, + boolean forceMVContentsUpToDate, HiveTxnManager txnMgr) throws HiveException { + return getValidMaterializedViews(tableName.getDb(), ImmutableList.of(tableName.getTable()), tablesUsed, + forceMVContentsUpToDate, txnMgr); } private List getValidMaterializedViews(String dbName, List materializedViewNames, @@ -3163,10 +3164,6 @@ private void alterPartitionSpec(Table tbl, String partPath) throws HiveException, InvalidOperationException { alterPartitionSpecInMemory(tbl, partSpec, tpart, inheritTableSpecs, partPath); - String fullName = tbl.getTableName(); - if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) { - fullName = tbl.getFullyQualifiedName(); - } alterPartition(tbl.getCatalogName(), tbl.getDbName(), tbl.getTableName(), new Partition(tbl, tpart), null, true); } @@ -3331,8 +3328,8 @@ private static void addInsertNonDirectoryInformation(Path p, FileSystem fileSyst public boolean dropPartition(String tblName, List part_vals, boolean deleteData) throws HiveException { - String[] names = Utilities.getDbTableName(tblName); - return dropPartition(names[0], names[1], part_vals, deleteData); + final TableName tn = Utilities.getTableName(tblName); + return dropPartition(tn.getDb(), tn.getTable(), part_vals, deleteData); } public boolean dropPartition(String db_name, String tbl_name, @@ -3423,20 +3420,17 @@ public boolean dropPartition(String dbName, String tableName, List partV ++partSpecKey; } - String[] names = Utilities.getDbTableName(table.getFullyQualifiedName()); - return dropPartitions(names[0], names[1], partSpecs, deleteData, ifExists); + return dropPartitions(Utilities.getTableName(table.getFullyQualifiedName()), partSpecs, deleteData, ifExists); } public List dropPartitions(String tblName, List partSpecs, boolean deleteData, boolean ifExists) throws HiveException { - String[] names = Utilities.getDbTableName(tblName); - return dropPartitions(names[0], names[1], partSpecs, deleteData, ifExists); + return dropPartitions(Utilities.getTableName(tblName), partSpecs, deleteData, ifExists); } - public List dropPartitions(String dbName, String tblName, - List partSpecs, boolean deleteData, + public List dropPartitions(TableName tableName, List partSpecs, boolean deleteData, boolean ifExists) throws HiveException { - return dropPartitions(dbName, tblName, partSpecs, + return dropPartitions(tableName, partSpecs, PartitionDropOptions.instance() .deleteData(deleteData) .ifExists(ifExists)); @@ -3444,22 +3438,21 @@ public boolean dropPartition(String dbName, String tableName, List partV public List dropPartitions(String tblName, List partSpecs, PartitionDropOptions dropOptions) throws HiveException { - String[] names = Utilities.getDbTableName(tblName); - return dropPartitions(names[0], names[1], partSpecs, dropOptions); + return dropPartitions(Utilities.getTableName(tblName), partSpecs, dropOptions); } - public List dropPartitions(String dbName, String tblName, - List partSpecs, PartitionDropOptions dropOptions) throws HiveException { + public List dropPartitions(TableName tableName, List partSpecs, + PartitionDropOptions dropOptions) throws HiveException { try { - Table tbl = getTable(dbName, tblName); + Table tbl = getTable(tableName.getDb(), tableName.getTable()); List> partExprs = new ArrayList<>(partSpecs.size()); for (DropTableDesc.PartSpec partSpec : partSpecs) { partExprs.add(new org.apache.hadoop.hive.metastore.utils.ObjectPair<>(partSpec.getPrefixLength(), SerializationUtilities.serializeExpressionToKryo(partSpec.getPartSpec()))); } - List tParts = getMSC().dropPartitions( - dbName, tblName, partExprs, dropOptions); + List tParts = + getMSC().dropPartitions(tableName.getDb(), tableName.getTable(), partExprs, dropOptions); return convertFromMetastore(tbl, tParts); } catch (NoSuchObjectException e) { throw new HiveException("Partition or table doesn't exist.", e); @@ -3468,13 +3461,7 @@ public boolean dropPartition(String dbName, String tableName, List partV } } - public List getPartitionNames(String tblName, short max) throws HiveException { - String[] names = Utilities.getDbTableName(tblName); - return getPartitionNames(names[0], names[1], max); - } - - public List getPartitionNames(String dbName, String tblName, short max) - throws HiveException { + public List getPartitionNames(String dbName, String tblName, short max) throws HiveException { List names = null; try { names = getMSC().listPartitionNames(dbName, tblName, max); @@ -5152,8 +5139,8 @@ public boolean deletePartitionColumnStatistics(String dbName, String tableName, } public Table newTable(String tableName) throws HiveException { - String[] names = Utilities.getDbTableName(tableName); - return new Table(names[0], names[1]); + final TableName tn = Utilities.getTableName(tableName); + return new Table(tn.getDb(), tn.getTable()); } public String getDelegationToken(String owner, String renewer) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java index e04a0f3dce..1239c72ff2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java @@ -22,6 +22,7 @@ import java.util.List; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.TableName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -436,14 +437,14 @@ public static String getReplPolicy(String dbName, String tableName) { } else if ((tableName == null) || (tableName.isEmpty())) { return dbName.toLowerCase() + ".*"; } else { - return dbName.toLowerCase() + "." + tableName.toLowerCase(); + return TableName.getDbTable(dbName, tableName).toLowerCase(); } } public static Path getDumpPath(Path root, String dbName, String tableName) { assert (dbName != null); if ((tableName != null) && (!tableName.isEmpty())) { - return new Path(root, dbName + "." + tableName); + return new Path(root, TableName.getDbTable(dbName, tableName).toLowerCase()); } return new Path(root, dbName); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/NotNullConstraint.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/NotNullConstraint.java index ffd42f2127..9d248b06cb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/NotNullConstraint.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/NotNullConstraint.java @@ -23,6 +23,7 @@ import java.util.Map; import java.util.TreeMap; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; /** @@ -69,7 +70,7 @@ public String getDatabaseName() { @Override public String toString() { StringBuilder sb = new StringBuilder(); - sb.append("Not Null Constraints for " + databaseName + "." + tableName + ":"); + sb.append("Not Null Constraints for " + TableName.getDbTable(databaseName, tableName) + ":"); sb.append("["); if (notNullConstraints != null && notNullConstraints.size() > 0) { for (Map.Entry me : notNullConstraints.entrySet()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/PrimaryKeyInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/PrimaryKeyInfo.java index c50bd7dca7..70c360a1cc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/PrimaryKeyInfo.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/PrimaryKeyInfo.java @@ -23,6 +23,7 @@ import java.util.List; import java.util.TreeMap; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; /** @@ -92,7 +93,7 @@ public void setColNames(Map colNames) { @Override public String toString() { StringBuilder sb = new StringBuilder(); - sb.append("Primary Key for " + databaseName+"."+tableName+":"); + sb.append("Primary Key for " + TableName.getDbTable(databaseName, tableName) +":"); sb.append("["); if (colNames != null && colNames.size() > 0) { for (Map.Entry me : colNames.entrySet()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/UniqueConstraint.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/UniqueConstraint.java index 24817a2dc4..96f9d85212 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/UniqueConstraint.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/UniqueConstraint.java @@ -24,6 +24,7 @@ import java.util.Map; import java.util.TreeMap; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; /** @@ -89,7 +90,7 @@ public String getDatabaseName() { @Override public String toString() { StringBuilder sb = new StringBuilder(); - sb.append("Unique Constraints for " + databaseName + "." + tableName + ":"); + sb.append("Unique Constraints for " + TableName.getDbTable(databaseName, tableName) + ":"); sb.append("["); if (uniqueConstraints != null && uniqueConstraints.size() > 0) { for (Map.Entry> me : uniqueConstraints.entrySet()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java index 4180dc471d..3fcb4261c5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java @@ -20,6 +20,7 @@ import org.apache.commons.lang.StringEscapeUtils; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; @@ -169,7 +170,7 @@ public static String getConstraintsInformation(PrimaryKeyInfo pkInfo, ForeignKey private static void getPrimaryKeyInformation(StringBuilder constraintsInfo, PrimaryKeyInfo pkInfo) { - formatOutput("Table:", pkInfo.getDatabaseName()+"."+pkInfo.getTableName(), constraintsInfo); + formatOutput("Table:", TableName.getDbTable(pkInfo.getDatabaseName(), pkInfo.getTableName()), constraintsInfo); formatOutput("Constraint Name:", pkInfo.getConstraintName(), constraintsInfo); Map colNames = pkInfo.getColNames(); final String columnNames = "Column Names:"; @@ -182,8 +183,8 @@ private static void getPrimaryKeyInformation(StringBuilder constraintsInfo, private static void getForeignKeyColInformation(StringBuilder constraintsInfo, ForeignKeyCol fkCol) { String[] fkcFields = new String[3]; - fkcFields[0] = "Parent Column Name:" + fkCol.parentDatabaseName + - "."+ fkCol.parentTableName + "." + fkCol.parentColName; + fkcFields[0] = String.join("", "Parent Column Name:", + TableName.getDbTable(fkCol.parentDatabaseName, fkCol.parentTableName), ".", fkCol.parentColName); fkcFields[1] = "Column Name:" + fkCol.childColName; fkcFields[2] = "Key Sequence:" + fkCol.position; formatOutput(fkcFields, constraintsInfo); @@ -204,8 +205,7 @@ private static void getForeignKeyRelInformation( private static void getForeignKeysInformation(StringBuilder constraintsInfo, ForeignKeyInfo fkInfo) { - formatOutput("Table:", - fkInfo.getChildDatabaseName()+"."+fkInfo.getChildTableName(), + formatOutput("Table:", TableName.getDbTable(fkInfo.getChildDatabaseName(), fkInfo.getChildTableName()), constraintsInfo); Map> foreignKeys = fkInfo.getForeignKeys(); if (foreignKeys != null && foreignKeys.size() > 0) { @@ -238,9 +238,7 @@ private static void getUniqueConstraintRelInformation( private static void getUniqueConstraintsInformation(StringBuilder constraintsInfo, UniqueConstraint ukInfo) { - formatOutput("Table:", - ukInfo.getDatabaseName() + "." + ukInfo.getTableName(), - constraintsInfo); + formatOutput("Table:", TableName.getDbTable(ukInfo.getDatabaseName(), ukInfo.getTableName()), constraintsInfo); Map> uniqueConstraints = ukInfo.getUniqueConstraints(); if (uniqueConstraints != null && uniqueConstraints.size() > 0) { for (Map.Entry> me : uniqueConstraints.entrySet()) { @@ -251,9 +249,7 @@ private static void getUniqueConstraintsInformation(StringBuilder constraintsInf private static void getNotNullConstraintsInformation(StringBuilder constraintsInfo, NotNullConstraint nnInfo) { - formatOutput("Table:", - nnInfo.getDatabaseName() + "." + nnInfo.getTableName(), - constraintsInfo); + formatOutput("Table:", TableName.getDbTable(nnInfo.getDatabaseName(),nnInfo.getTableName()), constraintsInfo); Map notNullConstraints = nnInfo.getNotNullConstraints(); if (notNullConstraints != null && notNullConstraints.size() > 0) { for (Map.Entry me : notNullConstraints.entrySet()) { @@ -308,9 +304,7 @@ private static void getCheckConstraintRelInformation( private static void getDefaultConstraintsInformation(StringBuilder constraintsInfo, DefaultConstraint dInfo) { - formatOutput("Table:", - dInfo.getDatabaseName() + "." + dInfo.getTableName(), - constraintsInfo); + formatOutput("Table:", TableName.getDbTable(dInfo.getDatabaseName(), dInfo.getTableName()), constraintsInfo); Map> defaultConstraints = dInfo.getDefaultConstraints(); if (defaultConstraints != null && defaultConstraints.size() > 0) { for (Map.Entry> me : defaultConstraints.entrySet()) { @@ -321,9 +315,7 @@ private static void getDefaultConstraintsInformation(StringBuilder constraintsIn private static void getCheckConstraintsInformation(StringBuilder constraintsInfo, CheckConstraint dInfo) { - formatOutput("Table:", - dInfo.getDatabaseName() + "." + dInfo.getTableName(), - constraintsInfo); + formatOutput("Table:", TableName.getDbTable(dInfo.getDatabaseName(), dInfo.getTableName()), constraintsInfo); Map> checkConstraints = dInfo.getCheckConstraints(); if (checkConstraints != null && checkConstraints.size() > 0) { for (Map.Entry> me : checkConstraints.entrySet()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java index c953e036f6..690a8639a1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java @@ -36,6 +36,7 @@ import java.util.TreeMap; import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.exec.AppMasterEventOperator; import org.apache.hadoop.hive.ql.exec.DummyStoreOperator; @@ -699,9 +700,8 @@ private static void gatherDPPTableScanOps( Map sortedTopOps = new TreeMap<>(pctx.getTopOps()); for (Entry e : sortedTopOps.entrySet()) { TableScanOperator tsOp = e.getValue(); - tableNameToOps.put( - tsOp.getConf().getTableMetadata().getDbName() + "." - + tsOp.getConf().getTableMetadata().getTableName(), tsOp); + tableNameToOps.put(TableName.getDbTable(tsOp.getConf().getTableMetadata().getDbName(), + tsOp.getConf().getTableMetadata().getTableName()), tsOp); } return tableNameToOps; } @@ -710,8 +710,7 @@ private static void gatherDPPTableScanOps( Map tableToTotalSize = new HashMap<>(); for (Entry e : pctx.getTopOps().entrySet()) { TableScanOperator tsOp = e.getValue(); - String tableName = tsOp.getConf().getTableMetadata().getDbName() + "." - + tsOp.getConf().getTableMetadata().getTableName(); + final String tableName = TableName.getDbTable(tsOp.getConf().getTableMetadata().getDbName(), tsOp.getConf().getTableMetadata().getTableName()); long tableSize = tsOp.getStatistics() != null ? tsOp.getStatistics().getDataSize() : 0L; Long totalSize = tableToTotalSize.get(tableName); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/AnalyzeCommandUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/AnalyzeCommandUtils.java index 1207be3028..58a657ad9a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/AnalyzeCommandUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/AnalyzeCommandUtils.java @@ -20,10 +20,10 @@ import java.util.HashMap; import java.util.Map; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.session.SessionState; public class AnalyzeCommandUtils { public static boolean isPartitionLevelStats(ASTNode tree) { @@ -40,9 +40,8 @@ public static boolean isPartitionLevelStats(ASTNode tree) { public static Table getTable(ASTNode tree, BaseSemanticAnalyzer sa) throws SemanticException { String tableName = ColumnStatsSemanticAnalyzer.getUnescapedName((ASTNode) tree.getChild(0).getChild(0)); - String currentDb = SessionState.get().getCurrentDatabase(); - String [] names = Utilities.getDbTableName(currentDb, tableName); - return sa.getTable(names[0], names[1], true); + TableName tn = Utilities.getTableName(tableName); + return sa.getTable(tn.getDb(), tn.getTable(), true); } public static Map getPartKeyValuePairsFromAST(Table tbl, ASTNode tree, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index e6779b24a5..f5737a237a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -38,11 +38,11 @@ import org.antlr.runtime.TokenRewriteStream; import org.antlr.runtime.tree.Tree; -import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.type.Date; import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; @@ -108,6 +108,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; +import org.spark_project.guava.base.Objects; /** * BaseSemanticAnalyzer. @@ -398,15 +399,8 @@ public static String getUnescapedName(ASTNode tableOrColumnNode, String currentD if (tokenType == HiveParser.TOK_TABNAME) { // table node Map.Entry dbTablePair = getDbTableNamePair(tableOrColumnNode); - String dbName = dbTablePair.getKey(); - String tableName = dbTablePair.getValue(); - if (dbName != null){ - return dbName + "." + tableName; - } - if (currentDatabase != null) { - return currentDatabase + "." + tableName; - } - return tableName; + return TableName.fromString(dbTablePair.getValue(), null, + Objects.firstNonNull(dbTablePair.getKey(), currentDatabase)).getDbTable(); } else if (tokenType == HiveParser.StringLiteral) { return unescapeSQLString(tableOrColumnNode.getText()); } @@ -414,32 +408,42 @@ public static String getUnescapedName(ASTNode tableOrColumnNode, String currentD return unescapeIdentifier(tableOrColumnNode.getText()); } - public static String[] getQualifiedTableName(ASTNode tabNameNode) throws SemanticException { - if (tabNameNode.getType() != HiveParser.TOK_TABNAME || - (tabNameNode.getChildCount() != 1 && tabNameNode.getChildCount() != 2)) { + /** + * Get the name reference of a DB table node. + * @param tabNameNode + * @return a {@link TableName}, not null. The catalog will be missing from this. + * @throws SemanticException + */ + public static TableName getQualifiedTableName(ASTNode tabNameNode) throws SemanticException { + // Ideally this would be removed, once the catalog is accessible in all use cases + return getQualifiedTableName(tabNameNode, null); + } + + /** + * Get the name reference of a DB table node. + * @param tabNameNode + * @param catalogName the catalog of the DB/object + * @return a {@link TableName}, not null. The catalog will be missing from this. + * @throws SemanticException + */ + public static TableName getQualifiedTableName(ASTNode tabNameNode, String catalogName) throws SemanticException { + if (tabNameNode.getType() != HiveParser.TOK_TABNAME || (tabNameNode.getChildCount() != 1 + && tabNameNode.getChildCount() != 2)) { throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME.getMsg(tabNameNode)); } if (tabNameNode.getChildCount() == 2) { - String dbName = unescapeIdentifier(tabNameNode.getChild(0).getText()); - String tableName = unescapeIdentifier(tabNameNode.getChild(1).getText()); + final String dbName = unescapeIdentifier(tabNameNode.getChild(0).getText()); + final String tableName = unescapeIdentifier(tabNameNode.getChild(1).getText()); if (dbName.contains(".") || tableName.contains(".")) { throw new SemanticException(ErrorMsg.OBJECTNAME_CONTAINS_DOT.getMsg(tabNameNode)); } - return new String[] {dbName, tableName}; + return TableName.fromString(tableName, catalogName, dbName); } - String tableName = unescapeIdentifier(tabNameNode.getChild(0).getText()); + final String tableName = unescapeIdentifier(tabNameNode.getChild(0).getText()); if (tableName.contains(".")) { throw new SemanticException(ErrorMsg.OBJECTNAME_CONTAINS_DOT.getMsg(tabNameNode)); } - return Utilities.getDbTableName(tableName); - } - - public static String getDotName(String[] qname) throws SemanticException { - String genericName = StringUtils.join(qname, "."); - if (qname.length != 2) { - throw new SemanticException(ErrorMsg.INVALID_TABLE_NAME, genericName); - } - return genericName; + return TableName.fromString(tableName, catalogName, SessionState.get().getCurrentDatabase()); } /** @@ -704,112 +708,105 @@ private static String spliceString(String str, int i, int length, String replace /** * Process the primary keys from the ast node and populate the SQLPrimaryKey list. */ - protected static void processPrimaryKeys(String databaseName, String tableName, - ASTNode child, List primaryKeys) throws SemanticException { + protected static void processPrimaryKeys(TableName tName, ASTNode child, List primaryKeys) + throws SemanticException { List primaryKeyInfos = new ArrayList(); generateConstraintInfos(child, primaryKeyInfos); - constraintInfosToPrimaryKeys(databaseName, tableName, primaryKeyInfos, primaryKeys); + constraintInfosToPrimaryKeys(tName, primaryKeyInfos, primaryKeys); } - protected static void processPrimaryKeys(String databaseName, String tableName, - ASTNode child, List columnNames, List primaryKeys) - throws SemanticException { + protected static void processPrimaryKeys(TableName tName, ASTNode child, List columnNames, + List primaryKeys) throws SemanticException { List primaryKeyInfos = new ArrayList(); generateConstraintInfos(child, columnNames, primaryKeyInfos, null, null); - constraintInfosToPrimaryKeys(databaseName, tableName, primaryKeyInfos, primaryKeys); + constraintInfosToPrimaryKeys(tName, primaryKeyInfos, primaryKeys); } - private static void constraintInfosToPrimaryKeys(String databaseName, String tableName, - List primaryKeyInfos, List primaryKeys) { + private static void constraintInfosToPrimaryKeys(TableName tName, List primaryKeyInfos, + List primaryKeys) { int i = 1; for (ConstraintInfo primaryKeyInfo : primaryKeyInfos) { - primaryKeys.add(new SQLPrimaryKey(databaseName, tableName, primaryKeyInfo.colName, - i++, primaryKeyInfo.constraintName, primaryKeyInfo.enable, - primaryKeyInfo.validate, primaryKeyInfo.rely)); + primaryKeys.add( + new SQLPrimaryKey(tName.getDb(), tName.getTable(), primaryKeyInfo.colName, i++, primaryKeyInfo.constraintName, + primaryKeyInfo.enable, primaryKeyInfo.validate, primaryKeyInfo.rely)); } } /** * Process the unique constraints from the ast node and populate the SQLUniqueConstraint list. */ - protected static void processUniqueConstraints(String catName, String databaseName, String tableName, - ASTNode child, List uniqueConstraints) throws SemanticException { + protected static void processUniqueConstraints(TableName tName, ASTNode child, + List uniqueConstraints) throws SemanticException { List uniqueInfos = new ArrayList(); generateConstraintInfos(child, uniqueInfos); - constraintInfosToUniqueConstraints(catName, databaseName, tableName, uniqueInfos, uniqueConstraints); + constraintInfosToUniqueConstraints(tName, uniqueInfos, uniqueConstraints); } - protected static void processUniqueConstraints(String catName, String databaseName, String tableName, - ASTNode child, List columnNames, List uniqueConstraints) - throws SemanticException { + protected static void processUniqueConstraints(TableName tName, ASTNode child, List columnNames, + List uniqueConstraints) throws SemanticException { List uniqueInfos = new ArrayList(); generateConstraintInfos(child, columnNames, uniqueInfos, null, null); - constraintInfosToUniqueConstraints(catName, databaseName, tableName, uniqueInfos, uniqueConstraints); + constraintInfosToUniqueConstraints(tName, uniqueInfos, uniqueConstraints); } - private static void constraintInfosToUniqueConstraints(String catName, String databaseName, String tableName, - List uniqueInfos, List uniqueConstraints) { + private static void constraintInfosToUniqueConstraints(TableName tName, List uniqueInfos, + List uniqueConstraints) { int i = 1; for (ConstraintInfo uniqueInfo : uniqueInfos) { - uniqueConstraints.add(new SQLUniqueConstraint(catName, databaseName, tableName, uniqueInfo.colName, - i++, uniqueInfo.constraintName, uniqueInfo.enable, uniqueInfo.validate, uniqueInfo.rely)); + uniqueConstraints.add( + new SQLUniqueConstraint(tName.getCat(), tName.getDb(), tName.getTable(), uniqueInfo.colName, i++, + uniqueInfo.constraintName, uniqueInfo.enable, uniqueInfo.validate, uniqueInfo.rely)); } } - protected static void processCheckConstraints(String catName, String databaseName, String tableName, - ASTNode child, List columnNames, - List checkConstraints, final ASTNode typeChild, - final TokenRewriteStream tokenRewriteStream) + protected static void processCheckConstraints(TableName tName, ASTNode child, List columnNames, + List checkConstraints, final ASTNode typeChild, final TokenRewriteStream tokenRewriteStream) throws SemanticException { List checkInfos = new ArrayList(); generateConstraintInfos(child, columnNames, checkInfos, typeChild, tokenRewriteStream); - constraintInfosToCheckConstraints(catName, databaseName, tableName, checkInfos, checkConstraints); + constraintInfosToCheckConstraints(tName, checkInfos, checkConstraints); } - private static void constraintInfosToCheckConstraints(String catName, String databaseName, String tableName, - List checkInfos, - List checkConstraints) { + private static void constraintInfosToCheckConstraints(TableName tName, List checkInfos, + List checkConstraints) { for (ConstraintInfo checkInfo : checkInfos) { - checkConstraints.add(new SQLCheckConstraint(catName, databaseName, tableName, checkInfo.colName, - checkInfo.defaultValue, checkInfo.constraintName, checkInfo.enable, - checkInfo.validate, checkInfo.rely)); + checkConstraints.add(new SQLCheckConstraint(tName.getCat(), tName.getDb(), tName.getTable(), checkInfo.colName, + checkInfo.defaultValue, checkInfo.constraintName, checkInfo.enable, checkInfo.validate, checkInfo.rely)); } } - protected static void processDefaultConstraints(String catName, String databaseName, String tableName, - ASTNode child, List columnNames, List defaultConstraints, final ASTNode typeChild, - final TokenRewriteStream tokenRewriteStream) - throws SemanticException { + protected static void processDefaultConstraints(TableName tName, ASTNode child, List columnNames, + List defaultConstraints, final ASTNode typeChild, + final TokenRewriteStream tokenRewriteStream) throws SemanticException { List defaultInfos = new ArrayList(); generateConstraintInfos(child, columnNames, defaultInfos, typeChild, tokenRewriteStream); - constraintInfosToDefaultConstraints(catName, databaseName, tableName, defaultInfos, defaultConstraints); + constraintInfosToDefaultConstraints(tName, defaultInfos, defaultConstraints); } - private static void constraintInfosToDefaultConstraints( - String catName, String databaseName, String tableName, - List defaultInfos, List defaultConstraints) { + private static void constraintInfosToDefaultConstraints(TableName tName, List defaultInfos, + List defaultConstraints) { for (ConstraintInfo defaultInfo : defaultInfos) { - defaultConstraints.add(new SQLDefaultConstraint(catName, databaseName, tableName, - defaultInfo.colName, defaultInfo.defaultValue, defaultInfo.constraintName, - defaultInfo.enable, defaultInfo.validate, defaultInfo.rely)); + defaultConstraints.add( + new SQLDefaultConstraint(tName.getCat(), tName.getDb(), tName.getTable(), defaultInfo.colName, + defaultInfo.defaultValue, defaultInfo.constraintName, defaultInfo.enable, defaultInfo.validate, + defaultInfo.rely)); } } - protected static void processNotNullConstraints(String catName, String databaseName, String tableName, - ASTNode child, List columnNames, List notNullConstraints) - throws SemanticException { + protected static void processNotNullConstraints(TableName tName, ASTNode child, List columnNames, + List notNullConstraints) throws SemanticException { List notNullInfos = new ArrayList(); generateConstraintInfos(child, columnNames, notNullInfos, null, null); - constraintInfosToNotNullConstraints(catName, databaseName, tableName, notNullInfos, notNullConstraints); + constraintInfosToNotNullConstraints(tName, notNullInfos, notNullConstraints); } - private static void constraintInfosToNotNullConstraints( - String catName, String databaseName, String tableName, List notNullInfos, + private static void constraintInfosToNotNullConstraints(TableName tName, List notNullInfos, List notNullConstraints) { + // TODO: null check and throw for (ConstraintInfo notNullInfo : notNullInfos) { - notNullConstraints.add(new SQLNotNullConstraint(catName, databaseName, tableName, - notNullInfo.colName, notNullInfo.constraintName, notNullInfo.enable, notNullInfo.validate, - notNullInfo.rely)); + notNullConstraints.add( + new SQLNotNullConstraint(tName.getCat(), tName.getDb(), tName.getTable(), notNullInfo.colName, + notNullInfo.constraintName, notNullInfo.enable, notNullInfo.validate, notNullInfo.rely)); } } @@ -1094,12 +1091,13 @@ else if(child.getToken().getType() == HiveParser.TOK_CHECK_CONSTRAINT) { /** * Process the foreign keys from the AST and populate the foreign keys in the SQLForeignKey list + * @param tName catalog/db/table name reference * @param child Foreign Key token node * @param foreignKeys SQLForeignKey list * @throws SemanticException */ - protected static void processForeignKeys(String databaseName, String tableName, - ASTNode child, List foreignKeys) throws SemanticException { + protected static void processForeignKeys(TableName tName, ASTNode child, List foreignKeys) + throws SemanticException { // The ANTLR grammar looks like : // 1. KW_CONSTRAINT idfr=identifier KW_FOREIGN KW_KEY fkCols=columnParenthesesList // KW_REFERENCES tabName=tableName parCols=columnParenthesesList @@ -1157,16 +1155,16 @@ protected static void processForeignKeys(String databaseName, String tableName, " The number of foreign key columns should be same as number of parent key columns ")); } - String[] parentDBTbl = getQualifiedTableName((ASTNode) child.getChild(ptIndex)); + final TableName parentTblName = getQualifiedTableName((ASTNode) child.getChild(ptIndex)); for (int j = 0; j < child.getChild(fkIndex).getChildCount(); j++) { SQLForeignKey sqlForeignKey = new SQLForeignKey(); - sqlForeignKey.setFktable_db(databaseName); - sqlForeignKey.setFktable_name(tableName); + sqlForeignKey.setFktable_db(tName.getDb()); + sqlForeignKey.setFktable_name(tName.getTable()); Tree fkgrandChild = child.getChild(fkIndex).getChild(j); checkColumnName(fkgrandChild.getText()); sqlForeignKey.setFkcolumn_name(unescapeIdentifier(fkgrandChild.getText().toLowerCase())); - sqlForeignKey.setPktable_db(parentDBTbl[0]); - sqlForeignKey.setPktable_name(parentDBTbl[1]); + sqlForeignKey.setPktable_db(parentTblName.getDb()); + sqlForeignKey.setPktable_name(parentTblName.getTable()); Tree pkgrandChild = child.getChild(pkIndex).getChild(j); sqlForeignKey.setPkcolumn_name(unescapeIdentifier(pkgrandChild.getText().toLowerCase())); sqlForeignKey.setKey_seq(j+1); @@ -1221,34 +1219,33 @@ private static void checkColumnName(String columnName) throws SemanticException ASTNode child = (ASTNode) ast.getChild(i); switch (child.getToken().getType()) { case HiveParser.TOK_UNIQUE: { - String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); + final TableName tName = + getQualifiedTableName((ASTNode) parent.getChild(0), MetaStoreUtils.getDefaultCatalog(conf)); // TODO CAT - for now always use the default catalog. Eventually will want to see if // the user specified a catalog - String catName = MetaStoreUtils.getDefaultCatalog(conf); - processUniqueConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], child, - uniqueConstraints); + processUniqueConstraints(tName, child, uniqueConstraints); } break; case HiveParser.TOK_PRIMARY_KEY: { if (!primaryKeys.isEmpty()) { - throw new SemanticException(ErrorMsg.INVALID_CONSTRAINT.getMsg( - "Cannot exist more than one primary key definition for the same table")); + throw new SemanticException(ErrorMsg.INVALID_CONSTRAINT + .getMsg("Cannot exist more than one primary key definition for the same table")); } - String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); - processPrimaryKeys(qualifiedTabName[0], qualifiedTabName[1], child, primaryKeys); + final TableName tName = getQualifiedTableName((ASTNode) parent.getChild(0)); + processPrimaryKeys(tName, child, primaryKeys); } break; case HiveParser.TOK_FOREIGN_KEY: { - String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); - processForeignKeys(qualifiedTabName[0], qualifiedTabName[1], child, foreignKeys); + final TableName tName = getQualifiedTableName((ASTNode) parent.getChild(0)); + processForeignKeys(tName, child, foreignKeys); } break; case HiveParser.TOK_CHECK_CONSTRAINT: { + final TableName tName = + getQualifiedTableName((ASTNode) parent.getChild(0), MetaStoreUtils.getDefaultCatalog(conf)); // TODO CAT - for now always use the default catalog. Eventually will want to see if // the user specified a catalog - String catName = MetaStoreUtils.getDefaultCatalog(conf); - String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); - processCheckConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], child, null, + processCheckConstraints(tName, child, null, checkConstraints, null, tokenRewriteStream); } break; @@ -1279,39 +1276,35 @@ private static void checkColumnName(String columnName) throws SemanticException constraintChild = (ASTNode) child.getChild(2); } if (constraintChild != null) { - String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); + final TableName tName = + getQualifiedTableName((ASTNode) parent.getChild(0), MetaStoreUtils.getDefaultCatalog(conf)); // TODO CAT - for now always use the default catalog. Eventually will want to see if // the user specified a catalog - String catName = MetaStoreUtils.getDefaultCatalog(conf); // Process column constraint switch (constraintChild.getToken().getType()) { case HiveParser.TOK_CHECK_CONSTRAINT: - processCheckConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], constraintChild, - ImmutableList.of(col.getName()), checkConstraints, typeChild, - tokenRewriteStream); + processCheckConstraints(tName, constraintChild, ImmutableList.of(col.getName()), checkConstraints, + typeChild, tokenRewriteStream); break; case HiveParser.TOK_DEFAULT_VALUE: - processDefaultConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], constraintChild, - ImmutableList.of(col.getName()), defaultConstraints, typeChild, tokenRewriteStream); + processDefaultConstraints(tName, constraintChild, ImmutableList.of(col.getName()), defaultConstraints, + typeChild, tokenRewriteStream); break; case HiveParser.TOK_NOT_NULL: - processNotNullConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], constraintChild, - ImmutableList.of(col.getName()), notNullConstraints); + processNotNullConstraints(tName, constraintChild, ImmutableList.of(col.getName()), notNullConstraints); break; case HiveParser.TOK_UNIQUE: - processUniqueConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], constraintChild, - ImmutableList.of(col.getName()), uniqueConstraints); + processUniqueConstraints(tName, constraintChild, ImmutableList.of(col.getName()), uniqueConstraints); break; case HiveParser.TOK_PRIMARY_KEY: if (!primaryKeys.isEmpty()) { - throw new SemanticException(ErrorMsg.INVALID_CONSTRAINT.getMsg( - "Cannot exist more than one primary key definition for the same table")); + throw new SemanticException(ErrorMsg.INVALID_CONSTRAINT + .getMsg("Cannot exist more than one primary key definition for the same table")); } - processPrimaryKeys(qualifiedTabName[0], qualifiedTabName[1], constraintChild, - ImmutableList.of(col.getName()), primaryKeys); + processPrimaryKeys(tName, constraintChild, ImmutableList.of(col.getName()), primaryKeys); break; case HiveParser.TOK_FOREIGN_KEY: - processForeignKeys(qualifiedTabName[0], qualifiedTabName[1], constraintChild, + processForeignKeys(tName, constraintChild, foreignKeys); break; default: @@ -1426,7 +1419,7 @@ private static String getUnionTypeStringFromAST(ASTNode typeNode) * */ public static class TableSpec { - public String tableName; + public TableName tableName; public Table tableHandle; public Map partSpec; // has to use LinkedHashMap to enforce order public Partition partHandle; @@ -1442,7 +1435,7 @@ public TableSpec(Hive db, HiveConf conf, ASTNode ast) public TableSpec(Table table) { tableHandle = table; - tableName = table.getDbName() + "." + table.getTableName(); + tableName = TableName.fromString(table.getTableName(), SessionState.get().getCurrentCatalog(), table.getDbName()); specType = SpecType.TABLE_ONLY; } @@ -1454,7 +1447,7 @@ public TableSpec(Hive db, String tableName, Map partSpec, boolea throws HiveException { Table table = db.getTable(tableName); tableHandle = table; - this.tableName = table.getDbName() + "." + table.getTableName(); + this.tableName = TableName.fromString(table.getTableName(), SessionState.get().getCurrentCatalog(), table.getDbName()); if (partSpec == null) { specType = SpecType.TABLE_ONLY; } else if(allowPartialPartitionsSpec) { @@ -1474,7 +1467,8 @@ public TableSpec(Hive db, String tableName, Map partSpec, boolea public TableSpec(Table tableHandle, List partitions) throws HiveException { this.tableHandle = tableHandle; - this.tableName = tableHandle.getTableName(); + this.tableName = TableName.fromString(tableHandle.getTableName(), + SessionState.get().getCurrentCatalog(), SessionState.get().getCurrentDatabase()); if (partitions != null && !partitions.isEmpty()) { this.specType = SpecType.STATIC_PARTITION; this.partitions = partitions; @@ -1513,16 +1507,17 @@ public TableSpec(Hive db, HiveConf conf, ASTNode ast, boolean allowDynamicPartit try { // get table metadata - tableName = getUnescapedName((ASTNode)ast.getChild(0)); + tableName = TableName.fromString(getUnescapedName((ASTNode)ast.getChild(0)), + SessionState.get().getCurrentCatalog(), SessionState.get().getCurrentDatabase()); boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE); if (testMode) { - tableName = conf.getVar(HiveConf.ConfVars.HIVETESTMODEPREFIX) - + tableName; + tableName = TableName.fromString(String.join("", conf.getVar(HiveConf.ConfVars.HIVETESTMODEPREFIX), + tableName.getTable()), tableName.getCat(), tableName.getDb()); // not that elegant, but hard to refactor } if (ast.getToken().getType() != HiveParser.TOK_CREATETABLE && ast.getToken().getType() != HiveParser.TOK_CREATE_MATERIALIZED_VIEW && ast.getToken().getType() != HiveParser.TOK_ALTER_MATERIALIZED_VIEW) { - tableHandle = db.getTable(tableName); + tableHandle = db.getTable(tableName.getTable()); } } catch (InvalidTableException ite) { throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(ast @@ -2172,12 +2167,12 @@ protected Database getDatabase(String dbName, boolean throwException) throws Sem return database; } - protected Table getTable(String[] qualified) throws SemanticException { - return getTable(qualified[0], qualified[1], true); + protected Table getTable(TableName tn) throws SemanticException { + return getTable(tn, true); } - protected Table getTable(String[] qualified, boolean throwException) throws SemanticException { - return getTable(qualified[0], qualified[1], throwException); + protected Table getTable(TableName tn, boolean throwException) throws SemanticException { + return getTable(tn.getDb(), tn.getTable(), throwException); } protected Table getTable(String tblName) throws SemanticException { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index f5a1c74671..088ac46e0f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -2185,8 +2185,7 @@ private RelNode applyMaterializedViewRewriting(RelOptPlanner planner, RelNode ba // We only retrieve the materialization corresponding to the rebuild. In turn, // we pass 'true' for the forceMVContentsUpToDate parameter, as we cannot allow the // materialization contents to be stale for a rebuild if we want to use it. - materializations = db.getValidMaterializedView(mvRebuildDbName, mvRebuildName, - getTablesUsed(basePlan), true, getTxnMgr()); + materializations = db.getValidMaterializedView(mvRebuildName, getTablesUsed(basePlan), true, getTxnMgr()); } else { // This is not a rebuild, we retrieve all the materializations. In turn, we do not need // to force the materialization contents to be up-to-date, as this is not a rebuild, and diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index d27a913c74..a2b671346c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -43,6 +43,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.TableType; @@ -281,11 +282,10 @@ public void analyzeInternal(ASTNode input) throws SemanticException { switch (ast.getType()) { case HiveParser.TOK_ALTERTABLE: { ast = (ASTNode) input.getChild(1); - String[] qualified = getQualifiedTableName((ASTNode) input.getChild(0)); + final TableName tName = + getQualifiedTableName((ASTNode) input.getChild(0), MetaStoreUtils.getDefaultCatalog(conf)); // TODO CAT - for now always use the default catalog. Eventually will want to see if // the user specified a catalog - String catName = MetaStoreUtils.getDefaultCatalog(conf); - String tableName = getDotName(qualified); HashMap partSpec = null; ASTNode partSpecNode = (ASTNode)input.getChild(2); if (partSpecNode != null) { @@ -295,70 +295,70 @@ public void analyzeInternal(ASTNode input) throws SemanticException { if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAMEPART) { partSpec = getPartSpec(partSpecNode); } else { - partSpec = getValidatedPartSpec(getTable(tableName), partSpecNode, conf, false); + partSpec = getValidatedPartSpec(getTable(tName.getTable()), partSpecNode, conf, false); } } if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAME) { - analyzeAlterTableRename(qualified, ast, false); + analyzeAlterTableRename(tName, ast, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_TOUCH) { - analyzeAlterTableTouch(qualified, ast); + analyzeAlterTableTouch(tName, ast); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ARCHIVE) { - analyzeAlterTableArchive(qualified, ast, false); + analyzeAlterTableArchive(tName, ast, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_UNARCHIVE) { - analyzeAlterTableArchive(qualified, ast, true); + analyzeAlterTableArchive(tName, ast, true); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ADDCOLS) { - analyzeAlterTableModifyCols(qualified, ast, partSpec, AlterTableTypes.ADDCOLS); + analyzeAlterTableModifyCols(tName, ast, partSpec, AlterTableTypes.ADDCOLS); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_REPLACECOLS) { - analyzeAlterTableModifyCols(qualified, ast, partSpec, AlterTableTypes.REPLACECOLS); + analyzeAlterTableModifyCols(tName, ast, partSpec, AlterTableTypes.REPLACECOLS); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAMECOL) { - analyzeAlterTableRenameCol(catName, qualified, ast, partSpec); + analyzeAlterTableRenameCol(tName, ast, partSpec); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ADDPARTS) { - analyzeAlterTableAddParts(qualified, ast, false); + analyzeAlterTableAddParts(tName, ast, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_DROPPARTS) { - analyzeAlterTableDropParts(qualified, ast, false); + analyzeAlterTableDropParts(tName, ast, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_PARTCOLTYPE) { - analyzeAlterTablePartColType(qualified, ast); + analyzeAlterTablePartColType(tName, ast); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_PROPERTIES) { - analyzeAlterTableProps(qualified, null, ast, false, false); + analyzeAlterTableProps(tName, null, ast, false, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_DROPPROPERTIES) { - analyzeAlterTableProps(qualified, null, ast, false, true); + analyzeAlterTableProps(tName, null, ast, false, true); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_UPDATESTATS) { - analyzeAlterTableProps(qualified, partSpec, ast, false, false); + analyzeAlterTableProps(tName, partSpec, ast, false, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_SKEWED) { - analyzeAltertableSkewedby(qualified, ast); + analyzeAltertableSkewedby(tName, ast); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_EXCHANGEPARTITION) { - analyzeExchangePartition(qualified, ast); + analyzeExchangePartition(tName, ast); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_FILEFORMAT) { - analyzeAlterTableFileFormat(ast, tableName, partSpec); + analyzeAlterTableFileFormat(ast, tName.getTable(), partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_LOCATION) { - analyzeAlterTableLocation(ast, tableName, partSpec); + analyzeAlterTableLocation(ast, tName.getTable(), partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_MERGEFILES) { - analyzeAlterTablePartMergeFiles(ast, tableName, partSpec); + analyzeAlterTablePartMergeFiles(ast, tName.getTable(), partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERIALIZER) { - analyzeAlterTableSerde(ast, tableName, partSpec); + analyzeAlterTableSerde(ast, tName.getTable(), partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES) { - analyzeAlterTableSerdeProps(ast, tableName, partSpec); + analyzeAlterTableSerdeProps(ast, tName.getTable(), partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_RENAMEPART) { - analyzeAlterTableRenamePart(ast, tableName, partSpec); + analyzeAlterTableRenamePart(ast, tName.getTable(), partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_SKEWED_LOCATION) { - analyzeAlterTableSkewedLocation(ast, tableName, partSpec); + analyzeAlterTableSkewedLocation(ast, tName.getTable(), partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_BUCKETS) { - analyzeAlterTableBucketNum(ast, tableName, partSpec); + analyzeAlterTableBucketNum(ast, tName.getTable(), partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_CLUSTER_SORT) { - analyzeAlterTableClusterSort(ast, tableName, partSpec); + analyzeAlterTableClusterSort(ast, tName.getTable(), partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_COMPACT) { - analyzeAlterTableCompact(ast, tableName, partSpec); - } else if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS){ - analyzeAlterTableUpdateStats(ast, tableName, partSpec); - } else if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_DROPCONSTRAINT) { - analyzeAlterTableDropConstraint(ast, tableName); - } else if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_ADDCONSTRAINT) { - analyzeAlterTableAddConstraint(ast, tableName); - } else if(ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_UPDATECOLUMNS) { - analyzeAlterTableUpdateColumns(ast, tableName, partSpec); + analyzeAlterTableCompact(ast, tName.getTable(), partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_UPDATECOLSTATS) { + analyzeAlterTableUpdateStats(ast, tName.getTable(), partSpec); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_DROPCONSTRAINT) { + analyzeAlterTableDropConstraint(ast, tName.getTable()); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_ADDCONSTRAINT) { + analyzeAlterTableAddConstraint(ast, tName.getTable()); + } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_UPDATECOLUMNS) { + analyzeAlterTableUpdateColumns(ast, tName.getTable(), partSpec); } else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_OWNER) { - analyzeAlterTableOwner(ast, tableName); + analyzeAlterTableOwner(ast, tName.getTable()); } break; } @@ -449,28 +449,27 @@ public void analyzeInternal(ASTNode input) throws SemanticException { analyzeDropTable(ast, TableType.MATERIALIZED_VIEW); break; case HiveParser.TOK_ALTERVIEW: { - String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); + final TableName tName = getQualifiedTableName((ASTNode) ast.getChild(0)); ast = (ASTNode) ast.getChild(1); if (ast.getType() == HiveParser.TOK_ALTERVIEW_PROPERTIES) { - analyzeAlterTableProps(qualified, null, ast, true, false); + analyzeAlterTableProps(tName, null, ast, true, false); } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_DROPPROPERTIES) { - analyzeAlterTableProps(qualified, null, ast, true, true); + analyzeAlterTableProps(tName, null, ast, true, true); } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_ADDPARTS) { - analyzeAlterTableAddParts(qualified, ast, true); + analyzeAlterTableAddParts(tName, ast, true); } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_DROPPARTS) { - analyzeAlterTableDropParts(qualified, ast, true); + analyzeAlterTableDropParts(tName, ast, true); } else if (ast.getType() == HiveParser.TOK_ALTERVIEW_RENAME) { - analyzeAlterTableRename(qualified, ast, true); + analyzeAlterTableRename(tName, ast, true); } break; } case HiveParser.TOK_ALTER_MATERIALIZED_VIEW: { ast = (ASTNode) input.getChild(1); - String[] qualified = getQualifiedTableName((ASTNode) input.getChild(0)); - String tableName = getDotName(qualified); + final TableName tName = getQualifiedTableName((ASTNode) input.getChild(0)); if (ast.getType() == HiveParser.TOK_ALTER_MATERIALIZED_VIEW_REWRITE) { - analyzeAlterMaterializedViewRewrite(tableName, ast); + analyzeAlterMaterializedViewRewrite(tName.getDbTable(), ast); } break; } @@ -816,8 +815,8 @@ private void analyzeAlterDatabaseLocation(ASTNode ast) throws SemanticException addAlterDbDesc(alterDesc); } - private void analyzeExchangePartition(String[] qualified, ASTNode ast) throws SemanticException { - Table destTable = getTable(qualified); + private void analyzeExchangePartition(TableName tName, ASTNode ast) throws SemanticException { + Table destTable = getTable(tName); Table sourceTable = getTable(getUnescapedName((ASTNode)ast.getChild(1))); // Get the partition specs @@ -1708,10 +1707,9 @@ private boolean hasConstraintsEnabled(final String tblName) throws SemanticExcep return false; } - private void analyzeAlterTableProps(String[] qualified, HashMap partSpec, - ASTNode ast, boolean expectView, boolean isUnset) throws SemanticException { + private void analyzeAlterTableProps(TableName tableName, Map partSpec, ASTNode ast, + boolean expectView, boolean isUnset) throws SemanticException { - String tableName = getDotName(qualified); HashMap mapProp = getProps((ASTNode) (ast.getChild(0)) .getChild(0)); EnvironmentContext environmentContext = null; @@ -1735,10 +1733,10 @@ private void analyzeAlterTableProps(String[] qualified, HashMap } // if table is being modified to be external we need to make sure existing table // doesn't have enabled constraint since constraints are disallowed with such tables - else if(entry.getKey().equals("external") && entry.getValue().equals("true")){ - if(hasConstraintsEnabled(qualified[1])){ + else if (entry.getKey().equals("external") && entry.getValue().equals("true")) { + if (hasConstraintsEnabled(tableName.getTable())) { throw new SemanticException( - ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Table: " + tableName + " has constraints enabled." + ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Table: " + tableName.getDbTable() + " has constraints enabled." + "Please remove those constraints to change this property.")); } } @@ -1759,7 +1757,7 @@ else if(entry.getKey().equals("external") && entry.getValue().equals("true")){ } } AlterTableDesc alterTblDesc = null; - if (isUnset == true) { + if (isUnset) { alterTblDesc = new AlterTableDesc(AlterTableTypes.DROPPROPS, partSpec, expectView); if (ast.getChild(1) != null) { alterTblDesc.setDropIfExists(true); @@ -1770,18 +1768,16 @@ else if(entry.getKey().equals("external") && entry.getValue().equals("true")){ } alterTblDesc.setProps(mapProp); alterTblDesc.setEnvironmentContext(environmentContext); - alterTblDesc.setOldName(tableName); - - + alterTblDesc.setOldName(tableName.getDbTable()); - boolean isToTxn = AcidUtils.isTablePropertyTransactional(mapProp) - || mapProp.containsKey(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES); - addInputsOutputsAlterTable(tableName, partSpec, alterTblDesc, isToTxn); + boolean isToTxn = AcidUtils.isTablePropertyTransactional(mapProp) || mapProp + .containsKey(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES); + addInputsOutputsAlterTable(tableName.getDbTable(), partSpec, alterTblDesc, isToTxn); // This special handling is because we cannot generate write ID for full ACID conversion, // it will break the weird 10000001-write-ID logic that is currently in use. However, we do // want to generate a write ID for prop changes for existing txn tables, or MM conversion. - boolean isAcidConversion = isToTxn && AcidUtils.isFullAcidTable(mapProp) - && !AcidUtils.isFullAcidTable(getTable(qualified, true)); + boolean isAcidConversion = + isToTxn && AcidUtils.isFullAcidTable(mapProp) && !AcidUtils.isFullAcidTable(getTable(tableName, true)); DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), alterTblDesc); if (isToTxn) { @@ -1790,7 +1786,7 @@ else if(entry.getKey().equals("external") && entry.getValue().equals("true")){ ddlWork.setNeedLock(true); // Hmm... why don't many other operations here need locks? } if (changeStatsSucceeded) { - Table table = getTable(qualified, true); + Table table = getTable(tableName, true); if (AcidUtils.isTransactionalTable(table)) { alterTblDesc.setIsExplicitStatsUpdate(true); setAcidDdlDesc(alterTblDesc); @@ -2244,10 +2240,9 @@ private void analyzeAlterTableDropConstraint(ASTNode ast, String tableName) private void analyzeAlterTableAddConstraint(ASTNode ast, String tableName) throws SemanticException { ASTNode parent = (ASTNode) ast.getParent(); - String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); + final TableName tName = getQualifiedTableName((ASTNode) parent.getChild(0), MetaStoreUtils.getDefaultCatalog(conf)); // TODO CAT - for now always use the default catalog. Eventually will want to see if // the user specified a catalog - String catName = MetaStoreUtils.getDefaultCatalog(conf); ASTNode child = (ASTNode) ast.getChild(0); List primaryKeys = new ArrayList<>(); List foreignKeys = new ArrayList<>(); @@ -2256,21 +2251,17 @@ private void analyzeAlterTableAddConstraint(ASTNode ast, String tableName) switch (child.getToken().getType()) { case HiveParser.TOK_UNIQUE: - BaseSemanticAnalyzer.processUniqueConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], - child, uniqueConstraints); + BaseSemanticAnalyzer.processUniqueConstraints(tName, child, uniqueConstraints); break; case HiveParser.TOK_PRIMARY_KEY: - BaseSemanticAnalyzer.processPrimaryKeys(qualifiedTabName[0], qualifiedTabName[1], - child, primaryKeys); + BaseSemanticAnalyzer.processPrimaryKeys(tName, child, primaryKeys); break; case HiveParser.TOK_FOREIGN_KEY: - BaseSemanticAnalyzer.processForeignKeys(qualifiedTabName[0], qualifiedTabName[1], - child, foreignKeys); + BaseSemanticAnalyzer.processForeignKeys(tName, child, foreignKeys); break; case HiveParser.TOK_CHECK_CONSTRAINT: - BaseSemanticAnalyzer.processCheckConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], - child, null, checkConstraints, child, - this.ctx.getTokenRewriteStream()); + BaseSemanticAnalyzer + .processCheckConstraints(tName, child, null, checkConstraints, child, this.ctx.getTokenRewriteStream()); break; default: throw new SemanticException(ErrorMsg.NOT_RECOGNIZED_CONSTRAINT.getMsg( @@ -2349,13 +2340,12 @@ static public String getFullyQualifiedName(ASTNode ast) { static public String getColPath( Hive db, ASTNode node, - String dbName, - String tableName, + TableName tableName, Map partSpec) throws SemanticException { // if this ast has only one child, then no column name specified. if (node.getChildCount() == 1) { - return tableName; + return tableName.getTable(); } ASTNode columnNode = null; @@ -2369,19 +2359,14 @@ static public String getColPath( } if (columnNode != null) { - if (dbName == null) { - return tableName + "." + QualifiedNameUtil.getFullyQualifiedName(columnNode); - } else { - return tableName.substring(dbName.length() + 1, tableName.length()) + "." + - QualifiedNameUtil.getFullyQualifiedName(columnNode); - } + return String.join(".", tableName.getTable(), QualifiedNameUtil.getFullyQualifiedName(columnNode)); } else { - return tableName; + return tableName.getNotEmptyDbTable(); } } // get partition metadata - static public Map getPartitionSpec(Hive db, ASTNode ast, String tableName) + static public Map getPartitionSpec(Hive db, ASTNode ast, TableName tableName) throws SemanticException { ASTNode partNode = null; // if this ast has only one child, then no partition spec specified. @@ -2403,10 +2388,10 @@ static public String getColPath( if (partNode != null) { Table tab = null; try { - tab = db.getTable(tableName); + tab = db.getTable(tableName.getNotEmptyDbTable()); } catch (InvalidTableException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName), e); + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName.getNotEmptyDbTable()), e); } catch (HiveException e) { throw new SemanticException(e.getMessage(), e); @@ -2488,8 +2473,7 @@ private void validateTable(String tableName, Map partSpec) private void analyzeDescribeTable(ASTNode ast) throws SemanticException { ASTNode tableTypeExpr = (ASTNode) ast.getChild(0); - String dbName = null; - String tableName = null; + final TableName tableName; String colPath = null; Map partSpec = null; @@ -2500,10 +2484,9 @@ private void analyzeDescribeTable(ASTNode ast) throws SemanticException { if (((ASTNode) tableTypeExpr.getChild(0)).getType() == HiveParser.TOK_TABNAME) { tableNode = (ASTNode) tableTypeExpr.getChild(0); if (tableNode.getChildCount() == 1) { - tableName = ((ASTNode) tableNode.getChild(0)).getText(); + tableName = Utilities.getTableName(((ASTNode) tableNode.getChild(0)).getText()); } else { - dbName = ((ASTNode) tableNode.getChild(0)).getText(); - tableName = dbName + "." + ((ASTNode) tableNode.getChild(1)).getText(); + tableName = TableName.fromString(((ASTNode) tableNode.getChild(1)).getText(), SessionState.get().getCurrentCatalog(), ((ASTNode) tableNode.getChild(0)).getText()); } } else { throw new SemanticException(((ASTNode) tableTypeExpr.getChild(0)).getText() + " is not an expected token type"); @@ -2513,19 +2496,19 @@ private void analyzeDescribeTable(ASTNode ast) throws SemanticException { partSpec = QualifiedNameUtil.getPartitionSpec(db, tableTypeExpr, tableName); // process the third child node,if exists, to get partition spec(s) - colPath = QualifiedNameUtil.getColPath(db, tableTypeExpr, dbName, tableName, partSpec); + colPath = QualifiedNameUtil.getColPath(db, tableTypeExpr, tableName, partSpec); // if database is not the one currently using // validate database - if (dbName != null) { - validateDatabase(dbName); + if (tableName.getDb() != null) { + validateDatabase(tableName.getDb()); } if (partSpec != null) { - validateTable(tableName, partSpec); + validateTable(tableName.getNotEmptyDbTable(), partSpec); } DescTableDesc descTblDesc = new DescTableDesc( - ctx.getResFile(), tableName, partSpec, colPath); + ctx.getResFile(), tableName.getNotEmptyDbTable(), partSpec, colPath); boolean showColStats = false; if (ast.getChildCount() == 2) { @@ -2536,7 +2519,7 @@ private void analyzeDescribeTable(ASTNode ast) throws SemanticException { // will contain tablename.column_name. If column_name is not specified // colPath will be equal to tableName. This is how we can differentiate // if we are describing a table or column - if (!colPath.equalsIgnoreCase(tableName) && descTblDesc.isFormatted()) { + if (!colPath.equalsIgnoreCase(tableName.getNotEmptyDbTable()) && descTblDesc.isFormatted()) { showColStats = true; } } @@ -2779,17 +2762,15 @@ private void analyzeShowTableStatus(ASTNode ast) throws SemanticException { private void analyzeShowTableProperties(ASTNode ast) throws SemanticException { ShowTblPropertiesDesc showTblPropertiesDesc; - String[] qualified = getQualifiedTableName((ASTNode) ast.getChild(0)); + final TableName tName = getQualifiedTableName((ASTNode) ast.getChild(0)); String propertyName = null; if (ast.getChildCount() > 1) { propertyName = unescapeSQLString(ast.getChild(1).getText()); } - String tableNames = getDotName(qualified); - validateTable(tableNames, null); + validateTable(tName.getDbTable(), null); - showTblPropertiesDesc = new ShowTblPropertiesDesc(ctx.getResFile().toString(), tableNames, - propertyName); + showTblPropertiesDesc = new ShowTblPropertiesDesc(ctx.getResFile().toString(), tName.getDbTable(), propertyName); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showTblPropertiesDesc))); setFetchTask(createFetchTask(showTblPropertiesDesc.getSchema())); @@ -3187,25 +3168,22 @@ private void analyzeDescFunction(ASTNode ast) throws SemanticException { } - private void analyzeAlterTableRename(String[] source, ASTNode ast, boolean expectView) + private void analyzeAlterTableRename(TableName source, ASTNode ast, boolean expectView) throws SemanticException { - String[] target = getQualifiedTableName((ASTNode) ast.getChild(0)); - - String sourceName = getDotName(source); - String targetName = getDotName(target); + final TableName target = getQualifiedTableName((ASTNode) ast.getChild(0)); - AlterTableDesc alterTblDesc = new AlterTableDesc(sourceName, targetName, expectView, null); - Table table = getTable(sourceName, true); + AlterTableDesc alterTblDesc = new AlterTableDesc(source.getDbTable(), target.getDbTable(), expectView, null); + Table table = getTable(source.getDbTable(), true); if (AcidUtils.isTransactionalTable(table)) { setAcidDdlDesc(alterTblDesc); } - addInputsOutputsAlterTable(sourceName, null, alterTblDesc); + addInputsOutputsAlterTable(source.getDbTable(), null, alterTblDesc); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); } - private void analyzeAlterTableRenameCol(String catName, String[] qualified, ASTNode ast, - HashMap partSpec) throws SemanticException { + private void analyzeAlterTableRenameCol(TableName tName, ASTNode ast, Map partSpec) + throws SemanticException { String newComment = null; boolean first = false; String flagCol = null; @@ -3248,35 +3226,29 @@ private void analyzeAlterTableRenameCol(String catName, String[] qualified, ASTN switch (constraintChild.getToken().getType()) { case HiveParser.TOK_CHECK_CONSTRAINT: checkConstraints = new ArrayList<>(); - processCheckConstraints(catName, qualified[0], qualified[1], constraintChild, - ImmutableList.of(newColName), checkConstraints, (ASTNode)ast.getChild(2), - this.ctx.getTokenRewriteStream()); + processCheckConstraints(tName, constraintChild, ImmutableList.of(newColName), checkConstraints, + (ASTNode) ast.getChild(2), this.ctx.getTokenRewriteStream()); break; case HiveParser.TOK_DEFAULT_VALUE: defaultConstraints = new ArrayList<>(); - processDefaultConstraints(catName, qualified[0], qualified[1], constraintChild, - ImmutableList.of(newColName), defaultConstraints, (ASTNode)ast.getChild(2), - this.ctx.getTokenRewriteStream()); + processDefaultConstraints(tName, constraintChild, ImmutableList.of(newColName), defaultConstraints, + (ASTNode) ast.getChild(2), this.ctx.getTokenRewriteStream()); break; case HiveParser.TOK_NOT_NULL: notNullConstraints = new ArrayList<>(); - processNotNullConstraints(catName, qualified[0], qualified[1], constraintChild, - ImmutableList.of(newColName), notNullConstraints); + processNotNullConstraints(tName, constraintChild, ImmutableList.of(newColName), notNullConstraints); break; case HiveParser.TOK_UNIQUE: uniqueConstraints = new ArrayList<>(); - processUniqueConstraints(catName, qualified[0], qualified[1], constraintChild, - ImmutableList.of(newColName), uniqueConstraints); + processUniqueConstraints(tName, constraintChild, ImmutableList.of(newColName), uniqueConstraints); break; case HiveParser.TOK_PRIMARY_KEY: primaryKeys = new ArrayList<>(); - processPrimaryKeys(qualified[0], qualified[1], constraintChild, - ImmutableList.of(newColName), primaryKeys); + processPrimaryKeys(tName, constraintChild, ImmutableList.of(newColName), primaryKeys); break; case HiveParser.TOK_FOREIGN_KEY: foreignKeys = new ArrayList<>(); - processForeignKeys(qualified[0], qualified[1], constraintChild, - foreignKeys); + processForeignKeys(tName, constraintChild, foreignKeys); break; default: throw new SemanticException(ErrorMsg.NOT_RECOGNIZED_CONSTRAINT.getMsg( @@ -3285,7 +3257,7 @@ private void analyzeAlterTableRenameCol(String catName, String[] qualified, ASTN } /* Validate the operation of renaming a column name. */ - Table tab = getTable(qualified); + Table tab = getTable(tName); if(checkConstraints != null && !checkConstraints.isEmpty()) { validateCheckConstraint(tab.getCols(), checkConstraints, ctx.getConf()); @@ -3306,21 +3278,18 @@ private void analyzeAlterTableRenameCol(String catName, String[] qualified, ASTN + ErrorMsg.ALTER_TABLE_NOT_ALLOWED_RENAME_SKEWED_COLUMN.getMsg()); } - String tblName = getDotName(qualified); AlterTableDesc alterTblDesc; if (primaryKeys == null && foreignKeys == null && uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null && checkConstraints == null) { - alterTblDesc = new AlterTableDesc(tblName, partSpec, - unescapeIdentifier(oldColName), unescapeIdentifier(newColName), - newType, newComment, first, flagCol, isCascade); + alterTblDesc = new AlterTableDesc(tName.getDbTable(), partSpec, unescapeIdentifier(oldColName), + unescapeIdentifier(newColName), newType, newComment, first, flagCol, isCascade); } else { - alterTblDesc = new AlterTableDesc(tblName, partSpec, - unescapeIdentifier(oldColName), unescapeIdentifier(newColName), - newType, newComment, first, flagCol, isCascade, - primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); + alterTblDesc = new AlterTableDesc(tName.getDbTable(), partSpec, unescapeIdentifier(oldColName), + unescapeIdentifier(newColName), newType, newComment, first, flagCol, isCascade, primaryKeys, foreignKeys, + uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints); } - addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc); + addInputsOutputsAlterTable(tName.getDbTable(), partSpec, alterTblDesc); if (AcidUtils.isTransactionalTable(tab)) { // Note: we might actually need it only when certain changes (e.g. name or type?) are made. setAcidDdlDesc(alterTblDesc); @@ -3373,30 +3342,26 @@ private void analyzeAlterTableBucketNum(ASTNode ast, String tblName, alterBucketNum))); } - private void analyzeAlterTableModifyCols(String[] qualified, ASTNode ast, - HashMap partSpec, AlterTableTypes alterType) throws SemanticException { + private void analyzeAlterTableModifyCols(TableName tName, ASTNode ast, Map partSpec, + AlterTableTypes alterType) throws SemanticException { - String tblName = getDotName(qualified); List newCols = getColumns((ASTNode) ast.getChild(0)); boolean isCascade = false; if (null != ast.getFirstChildWithType(HiveParser.TOK_CASCADE)) { isCascade = true; } - AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, partSpec, newCols, - alterType, isCascade); - Table table = getTable(tblName, true); + AlterTableDesc alterTblDesc = new AlterTableDesc(tName.getDbTable(), partSpec, newCols, alterType, isCascade); + Table table = getTable(tName.getDbTable(), true); if (AcidUtils.isTransactionalTable(table)) { setAcidDdlDesc(alterTblDesc); } - addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc); - rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - alterTblDesc))); + addInputsOutputsAlterTable(tName.getDbTable(), partSpec, alterTblDesc); + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); } - private void analyzeAlterTableDropParts(String[] qualified, ASTNode ast, boolean expectView) - throws SemanticException { + private void analyzeAlterTableDropParts(TableName tName, ASTNode ast, boolean expectView) throws SemanticException { boolean ifExists = (ast.getFirstChildWithType(HiveParser.TOK_IFEXISTS) != null) || HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT); @@ -3413,7 +3378,7 @@ private void analyzeAlterTableDropParts(String[] qualified, ASTNode ast, boolean Table tab = null; try { - tab = getTable(qualified); + tab = getTable(tName); } catch (SemanticException se){ if (replicationSpec.isInReplicationScope() && ( @@ -3447,17 +3412,15 @@ private void analyzeAlterTableDropParts(String[] qualified, ASTNode ast, boolean addTableDropPartsOutputs(tab, partSpecs.values(), !ifExists); DropTableDesc dropTblDesc = - new DropTableDesc(getDotName(qualified), partSpecs, expectView ? TableType.VIRTUAL_VIEW : null, - mustPurge, replicationSpec); + new DropTableDesc(tName.getDbTable(), partSpecs, expectView ? TableType.VIRTUAL_VIEW : null, mustPurge, + replicationSpec); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc))); } - private void analyzeAlterTablePartColType(String[] qualified, ASTNode ast) - throws SemanticException { - + private void analyzeAlterTablePartColType(TableName tName, ASTNode ast) throws SemanticException { // check if table exists. - Table tab = getTable(qualified); + Table tab = getTable(tName); inputs.add(new ReadEntity(tab)); // validate the DDL is a valid operation on the table. @@ -3494,8 +3457,7 @@ private void analyzeAlterTablePartColType(String[] qualified, ASTNode ast) throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(newCol.getName())); } - AlterTableAlterPartDesc alterTblAlterPartDesc = - new AlterTableAlterPartDesc(getDotName(qualified), newCol); + AlterTableAlterPartDesc alterTblAlterPartDesc = new AlterTableAlterPartDesc(tName.getDbTable(), newCol); if (AcidUtils.isTransactionalTable(tab)) { setAcidDdlDesc(alterTblAlterPartDesc); } @@ -3504,7 +3466,7 @@ private void analyzeAlterTablePartColType(String[] qualified, ASTNode ast) alterTblAlterPartDesc))); } - /** + /** * Add one or more partitions to a table. Useful when the data has been copied * to the right location by some other process. * @@ -3517,13 +3479,12 @@ private void analyzeAlterTablePartColType(String[] qualified, ASTNode ast) * @throws SemanticException * Parsing failed */ - private void analyzeAlterTableAddParts(String[] qualified, CommonTree ast, boolean expectView) - throws SemanticException { + private void analyzeAlterTableAddParts(TableName tName, CommonTree ast, boolean expectView) throws SemanticException { // ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists? alterStatementSuffixAddPartitionsElement+) boolean ifNotExists = ast.getChild(0).getType() == HiveParser.TOK_IFNOTEXISTS; - Table tab = getTable(qualified); + Table tab = getTable(tName); boolean isView = tab.isView(); validateAlterTableType(tab, AlterTableTypes.ADDPARTITION, expectView); outputs.add(new WriteEntity(tab, @@ -3598,9 +3559,9 @@ private void analyzeAlterTableAddParts(String[] qualified, CommonTree ast, boole // Compile internal query to capture underlying table partition dependencies StringBuilder cmd = new StringBuilder(); cmd.append("SELECT * FROM "); - cmd.append(HiveUtils.unparseIdentifier(qualified[0])); + cmd.append(HiveUtils.unparseIdentifier(tName.getDb())); cmd.append("."); - cmd.append(HiveUtils.unparseIdentifier(qualified[1])); + cmd.append(HiveUtils.unparseIdentifier(tName.getTable())); cmd.append(" WHERE "); boolean firstOr = true; for (int i = 0; i < addPartitionDesc.getPartitionCount(); ++i) { @@ -3703,10 +3664,9 @@ private void handleTransactionalTable(Table tab, AddPartitionDesc addPartitionDe * @throws SemanticException * Parsing failed */ - private void analyzeAlterTableTouch(String[] qualified, CommonTree ast) - throws SemanticException { + private void analyzeAlterTableTouch(TableName tName, CommonTree ast) throws SemanticException { - Table tab = getTable(qualified); + Table tab = getTable(tName); validateAlterTableType(tab, AlterTableTypes.TOUCH); inputs.add(new ReadEntity(tab)); @@ -3715,7 +3675,7 @@ private void analyzeAlterTableTouch(String[] qualified, CommonTree ast) if (partSpecs.size() == 0) { AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc( - getDotName(qualified), null, + tName.getDbTable(), null, AlterTableDesc.AlterTableTypes.TOUCH); outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_NO_LOCK)); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), @@ -3723,23 +3683,21 @@ private void analyzeAlterTableTouch(String[] qualified, CommonTree ast) } else { addTablePartsOutputs(tab, partSpecs, WriteEntity.WriteType.DDL_NO_LOCK); for (Map partSpec : partSpecs) { - AlterTableSimpleDesc touchDesc = new AlterTableSimpleDesc( - getDotName(qualified), partSpec, - AlterTableDesc.AlterTableTypes.TOUCH); + AlterTableSimpleDesc touchDesc = + new AlterTableSimpleDesc(tName.getDbTable(), partSpec, AlterTableDesc.AlterTableTypes.TOUCH); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), touchDesc))); } } } - private void analyzeAlterTableArchive(String[] qualified, CommonTree ast, boolean isUnArchive) - throws SemanticException { + private void analyzeAlterTableArchive(TableName tName, CommonTree ast, boolean isUnArchive) throws SemanticException { if (!conf.getBoolVar(HiveConf.ConfVars.HIVEARCHIVEENABLED)) { throw new SemanticException(ErrorMsg.ARCHIVE_METHODS_DISABLED.getMsg()); } - Table tab = getTable(qualified); + Table tab = getTable(tName); // partition name to value List> partSpecs = getPartitionSpecs(tab, ast); @@ -3762,8 +3720,7 @@ private void analyzeAlterTableArchive(String[] qualified, CommonTree ast, boolea } catch (HiveException e) { throw new SemanticException(e.getMessage(), e); } - AlterTableSimpleDesc archiveDesc = new AlterTableSimpleDesc( - getDotName(qualified), partSpec, + AlterTableSimpleDesc archiveDesc = new AlterTableSimpleDesc(tName.getDbTable(), partSpec, (isUnArchive ? AlterTableTypes.UNARCHIVE : AlterTableTypes.ARCHIVE)); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), archiveDesc))); @@ -4136,35 +4093,32 @@ private void addTableDropPartsOutputs(Table tab, * node * @throws SemanticException */ - private void analyzeAltertableSkewedby(String[] qualified, ASTNode ast) throws SemanticException { + private void analyzeAltertableSkewedby(TableName tName, ASTNode ast) throws SemanticException { /** * Throw an error if the user tries to use the DDL with * hive.internal.ddl.list.bucketing.enable set to false. */ - HiveConf hiveConf = SessionState.get().getConf(); - - Table tab = getTable(qualified); + Table tab = getTable(tName); inputs.add(new ReadEntity(tab)); outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE)); validateAlterTableType(tab, AlterTableTypes.ADDSKEWEDBY); - String tableName = getDotName(qualified); if (ast.getChildCount() == 0) { /* Convert a skewed table to non-skewed table. */ - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, true, - new ArrayList(), new ArrayList>()); + AlterTableDesc alterTblDesc = + new AlterTableDesc(tName.getDbTable(), true, new ArrayList<>(), new ArrayList<>()); alterTblDesc.setStoredAsSubDirectories(false); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); } else { switch (((ASTNode) ast.getChild(0)).getToken().getType()) { case HiveParser.TOK_TABLESKEWED: - handleAlterTableSkewedBy(ast, tableName, tab); + handleAlterTableSkewedBy(ast, tName.getDbTable(), tab); break; case HiveParser.TOK_STOREDASDIRS: - handleAlterTableDisableStoredAsDirs(tableName, tab); + handleAlterTableDisableStoredAsDirs(tName.getDbTable(), tab); break; default: assert false; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/MaterializedViewRebuildSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/MaterializedViewRebuildSemanticAnalyzer.java index 7a3c16390c..ec45ec28f5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/MaterializedViewRebuildSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/MaterializedViewRebuildSemanticAnalyzer.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.parse; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.LockState; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -27,7 +28,6 @@ import org.apache.hadoop.hive.ql.lockmgr.LockException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,12 +54,12 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { return; } - String[] qualifiedTableName = getQualifiedTableName((ASTNode) ast.getChild(0)); - String dbDotTable = getDotName(qualifiedTableName); + final TableName tableName = + getQualifiedTableName((ASTNode) ast.getChild(0), SessionState.get().getCurrentCatalog()); ASTNode rewrittenAST; // We need to go lookup the table and get the select statement and then parse it. try { - Table tab = getTableObjectByName(dbDotTable, true); + Table tab = getTableObjectByName(tableName.getDbTable(), true); if (!tab.isMaterializedView()) { // Cannot rebuild not materialized view throw new SemanticException(ErrorMsg.REBUILD_NO_MATERIALIZED_VIEW); @@ -71,9 +71,8 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { throw new SemanticException(ErrorMsg.MATERIALIZED_VIEW_DEF_EMPTY); } Context ctx = new Context(queryState.getConf()); - rewrittenAST = ParseUtils.parse("insert overwrite table " + - "`" + qualifiedTableName[0] + "`.`" + qualifiedTableName[1] + "` " + - viewText, ctx); + rewrittenAST = ParseUtils.parse(String.join("", "insert overwrite table ", + "`", tableName.getDb(), "`.`", tableName.getTable(), "` ", viewText), ctx); this.ctx.addRewrittenStatementContext(ctx); if (!this.ctx.isExplainPlan() && AcidUtils.isTransactionalTable(tab)) { @@ -84,22 +83,21 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { LockState state; try { state = txnManager.acquireMaterializationRebuildLock( - qualifiedTableName[0], qualifiedTableName[1], txnManager.getCurrentTxnId()).getState(); + tableName.getDb(), tableName.getTable(), txnManager.getCurrentTxnId()).getState(); } catch (LockException e) { throw new SemanticException("Exception acquiring lock for rebuilding the materialized view", e); } if (state != LockState.ACQUIRED) { - throw new SemanticException("Another process is rebuilding the materialized view " + dbDotTable); + throw new SemanticException("Another process is rebuilding the materialized view " + tableName.getDbTable()); } } } catch (Exception e) { throw new SemanticException(e); } mvRebuildMode = MaterializationRebuildMode.INSERT_OVERWRITE_REBUILD; - mvRebuildDbName = qualifiedTableName[0]; - mvRebuildName = qualifiedTableName[1]; + mvRebuildName = tableName; - LOG.debug("Rebuilding materialized view " + dbDotTable); + LOG.debug("Rebuilding materialized view " + tableName.getDbTable()); super.analyzeInternal(rewrittenAST); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java index ed0da84e26..f7488a4b16 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java @@ -30,6 +30,7 @@ import org.antlr.runtime.tree.Tree; import org.apache.hadoop.hive.common.StringInternUtils; +import org.apache.hadoop.hive.common.TableName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.AnalyzeRewriteContext; @@ -200,8 +201,7 @@ public boolean isDestToOpTypeInsertOverwrite(String clause) { * See also {@link #getInsertOverwriteTables()} */ public boolean isInsertIntoTable(String dbName, String table) { - String fullName = dbName + "." + table; - return insertIntoTables.containsKey(fullName.toLowerCase()); + return insertIntoTables.containsKey(TableName.getDbTable(dbName, table).toLowerCase()); } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java index 33247f0745..31fde804de 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java @@ -24,6 +24,7 @@ import java.util.Map; import java.util.Set; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.TableType; @@ -176,27 +177,27 @@ protected Table getTargetTable(ASTNode tabRef) throws SemanticException { /** * @param throwException if false, return null if table doesn't exist, else throw */ - protected static Table getTable(ASTNode tabRef, Hive db, boolean throwException) throws SemanticException { - String[] tableName; - switch (tabRef.getType()) { + protected static Table getTable(ASTNode tabNode, Hive db, boolean throwException) throws SemanticException { + final TableName tName; + switch (tabNode.getType()) { case HiveParser.TOK_TABREF: - tableName = getQualifiedTableName((ASTNode) tabRef.getChild(0)); + tName = getQualifiedTableName((ASTNode) tabNode.getChild(0)); break; case HiveParser.TOK_TABNAME: - tableName = getQualifiedTableName(tabRef); + tName = getQualifiedTableName(tabNode); break; default: - throw raiseWrongType("TOK_TABREF|TOK_TABNAME", tabRef); + throw raiseWrongType("TOK_TABREF|TOK_TABNAME", tabNode); } Table mTable; try { - mTable = db.getTable(tableName[0], tableName[1], throwException); + mTable = db.getTable(tName.getDb(), tName.getTable(), throwException); } catch (InvalidTableException e) { - LOG.error("Failed to find table " + getDotName(tableName) + " got exception " + e.getMessage()); - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(getDotName(tableName)), e); + LOG.error("Failed to find table " + tName.getDbTable() + " got exception " + e.getMessage()); + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tName.getDbTable()), e); } catch (HiveException e) { - LOG.error("Failed to find table " + getDotName(tableName) + " got exception " + e.getMessage()); + LOG.error("Failed to find table " + tName.getDbTable() + " got exception " + e.getMessage()); throw new SemanticException(e.getMessage(), e); } return mTable; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 8dc5b34a34..0b6aede575 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -69,6 +69,7 @@ import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.StatsSetupConst.StatDB; import org.apache.hadoop.hive.common.StringInternUtils; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.ValidTxnWriteIdList; import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; @@ -359,8 +360,7 @@ // whether this is a mv rebuild rewritten expression protected MaterializationRebuildMode mvRebuildMode = MaterializationRebuildMode.NONE; - protected String mvRebuildDbName; // Db name for materialization to rebuild - protected String mvRebuildName; // Name for materialization to rebuild + protected TableName mvRebuildName; // Table ref names for for materialization to rebuild protected volatile boolean disableJoinMerge = false; protected final boolean defaultJoinMerge; @@ -2261,9 +2261,9 @@ private void getMetaData(QB qb, ReadEntity parentInput) // Whether we are using an acid compliant transaction manager has already been caught in // UpdateDeleteSemanticAnalyzer, so if we are updating or deleting and getting nonAcid // here, it means the table itself doesn't support it. - throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, ts.tableName); + throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, ts.tableName.getDbTable()); } else { - throw new SemanticException(ErrorMsg.ACID_OP_ON_INSERTONLYTRAN_TABLE, ts.tableName); + throw new SemanticException(ErrorMsg.ACID_OP_ON_INSERTONLYTRAN_TABLE, ts.tableName.getDbTable()); } } // TableSpec ts is got from the query (user specified), @@ -2282,7 +2282,7 @@ private void getMetaData(QB qb, ReadEntity parentInput) } if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { // Add the table spec for the destination table. - qb.getParseInfo().addTableSpec(ts.tableName.toLowerCase(), ts); + qb.getParseInfo().addTableSpec(ts.tableName.getDbTable().toLowerCase(), ts); } break; } @@ -2304,14 +2304,13 @@ private void getMetaData(QB qb, ReadEntity parentInput) location = new Path(qb.getTableDesc().getLocation()); } else { // allocate a temporary output dir on the location of the table - String tableName = getUnescapedName((ASTNode) ast.getChild(0)); - String[] names = Utilities.getDbTableName(tableName); + final TableName tName = Utilities.getTableName(getUnescapedName((ASTNode) ast.getChild(0))); try { Warehouse wh = new Warehouse(conf); //Use destination table's db location. String destTableDb = qb.getTableDesc() != null ? qb.getTableDesc().getDatabaseName() : null; if (destTableDb == null) { - destTableDb = names[0]; + destTableDb = tName.getDb(); } location = wh.getDatabasePath(db.getDatabase(destTableDb)); } catch (MetaException e) { @@ -2335,7 +2334,7 @@ private void getMetaData(QB qb, ReadEntity parentInput) if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { TableSpec ts = new TableSpec(db, conf, this.ast); // Add the table spec for the destination table. - qb.getParseInfo().addTableSpec(ts.tableName.toLowerCase(), ts); + qb.getParseInfo().addTableSpec(ts.tableName.getDbTable().toLowerCase(), ts); } } else { // This is the only place where isQuery is set to true; it defaults to false. @@ -6903,12 +6902,11 @@ private void genPartnCols(String dest, Operator input, QB qb, @SuppressWarnings("unchecked") private void setStatsForNonNativeTable(String dbName, String tableName) throws SemanticException { - String qTableName = DDLSemanticAnalyzer.getDotName(new String[] { dbName, - tableName }); + final TableName tName = TableName.fromString(tableName, SessionState.get().getCurrentCatalog(), dbName); AlterTableDesc alterTblDesc = new AlterTableDesc(AlterTableTypes.DROPPROPS, null, false); HashMap mapProp = new HashMap<>(); mapProp.put(StatsSetupConst.COLUMN_STATS_ACCURATE, null); - alterTblDesc.setOldName(qTableName); + alterTblDesc.setOldName(tName.getDbTable()); alterTblDesc.setProps(mapProp); alterTblDesc.setDropIfExists(true); this.rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc))); @@ -7927,7 +7925,7 @@ private void handleLineage(LoadTableDesc ltd, Operator output) } else if ( queryState.getCommandType().equals(HiveOperation.CREATETABLE_AS_SELECT.getOperationName())) { Path tlocation = null; - String tName = Utilities.getDbTableName(tableDesc.getTableName())[1]; + final String tName = Utilities.getTableName(tableDesc.getTableName()).getTable(); try { Warehouse wh = new Warehouse(conf); tlocation = wh.getDefaultTablePath(db.getDatabase(tableDesc.getDatabaseName()), @@ -11365,7 +11363,7 @@ private void setupStats(TableScanDesc tsDesc, QBParseInfo qbp, Table tab, String // as the prefix for easy of read during explain and debugging. // Currently, partition spec can only be static partition. String k = org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(tblName) + Path.SEPARATOR; - tsDesc.setStatsAggPrefix(tab.getDbName()+"."+k); + tsDesc.setStatsAggPrefix(TableName.getDbTable(tab.getDbName(), k)); // set up WriteEntity for replication and txn stats WriteEntity we = new WriteEntity(tab, WriteEntity.WriteType.DDL_SHARED); @@ -12619,7 +12617,7 @@ protected void saveViewDefinition() throws SemanticException { sb.append(" FROM ("); sb.append(expandedText); sb.append(") "); - sb.append(HiveUtils.unparseIdentifier(Utilities.getDbTableName(createVwDesc.getViewName())[1], conf)); + sb.append(HiveUtils.unparseIdentifier(Utilities.getTableName(createVwDesc.getViewName()).getTable(), conf)); expandedText = sb.toString(); } } else { @@ -12653,7 +12651,7 @@ protected void saveViewDefinition() throws SemanticException { sb.append(" FROM ("); sb.append(expandedText); sb.append(") "); - sb.append(HiveUtils.unparseIdentifier(Utilities.getDbTableName(createVwDesc.getViewName())[1], conf)); + sb.append(HiveUtils.unparseIdentifier(Utilities.getTableName(createVwDesc.getViewName()).getTable(), conf)); expandedText = sb.toString(); } @@ -13174,8 +13172,7 @@ boolean hasConstraints(final List partCols, final List cols = new ArrayList(); @@ -13213,7 +13210,7 @@ ASTNode analyzeCreateTable( RowFormatParams rowFormatParams = new RowFormatParams(); StorageFormat storageFormat = new StorageFormat(conf); - LOG.info("Creating table " + dbDotTab + " position=" + ast.getCharPositionInLine()); + LOG.info("Creating table " + tName.getDbTable() + " position=" + ast.getCharPositionInLine()); int numCh = ast.getChildCount(); /* @@ -13377,7 +13374,7 @@ ASTNode analyzeCreateTable( // check for existence of table if (ifNotExists) { try { - Table table = getTable(qualifiedTabName, false); + Table table = getTable(tName, false); if (table != null) { // table exists return null; } @@ -13415,11 +13412,11 @@ ASTNode analyzeCreateTable( throw new SemanticException( "Partition columns can only declared using their name and types in regular CREATE TABLE statements"); } - tblProps = validateAndAddDefaultProperties( - tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary, isTransactional); - addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, isTemporary, tblProps); + tblProps = validateAndAddDefaultProperties(tblProps, isExt, storageFormat, tName.getDbTable(), sortCols, + isMaterialization, isTemporary, isTransactional); + addDbAndTabToOutputs(tName, TableType.MANAGED_TABLE, isTemporary, tblProps); - CreateTableDesc crtTblDesc = new CreateTableDesc(dbDotTab, isExt, isTemporary, cols, partCols, + CreateTableDesc crtTblDesc = new CreateTableDesc(tName.getDbTable(), isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, @@ -13440,14 +13437,14 @@ ASTNode analyzeCreateTable( case ctt: // CREATE TRANSACTIONAL TABLE if (isExt) { throw new SemanticException( - qualifiedTabName[1] + " cannot be declared transactional because it's an external table"); + tName.getTable() + " cannot be declared transactional because it's an external table"); } - tblProps = validateAndAddDefaultProperties(tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, - isTemporary, isTransactional); - addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, false, tblProps); + tblProps = validateAndAddDefaultProperties(tblProps, isExt, storageFormat, tName.getDbTable(), sortCols, + isMaterialization, isTemporary, isTransactional); + addDbAndTabToOutputs(tName, TableType.MANAGED_TABLE, false, tblProps); CreateTableDesc crtTranTblDesc = - new CreateTableDesc(dbDotTab, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, + new CreateTableDesc(tName.getDbTable(), isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), @@ -13463,9 +13460,9 @@ ASTNode analyzeCreateTable( break; case CTLT: // create table like - tblProps = validateAndAddDefaultProperties( - tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary, isTransactional); - addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, isTemporary, tblProps); + tblProps = validateAndAddDefaultProperties(tblProps, isExt, storageFormat, tName.getDbTable(), sortCols, + isMaterialization, isTemporary, isTransactional); + addDbAndTabToOutputs(tName, TableType.MANAGED_TABLE, isTemporary, tblProps); if (isTemporary) { Table likeTable = getTable(likeTableName, false); @@ -13474,7 +13471,7 @@ ASTNode analyzeCreateTable( + "and source table in CREATE TABLE LIKE is partitioned."); } } - CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(dbDotTab, isExt, isTemporary, + CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(tName.getDbTable(), isExt, isTemporary, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getSerdeProps(), tblProps, ifNotExists, likeTableName, isUserStorageFormat); @@ -13486,27 +13483,25 @@ ASTNode analyzeCreateTable( if (isTemporary) { if (!ctx.isExplainSkipExecution() && !isMaterialization) { - String dbName = qualifiedTabName[0]; - String tblName = qualifiedTabName[1]; SessionState ss = SessionState.get(); if (ss == null) { - throw new SemanticException("No current SessionState, cannot create temporary table " - + dbName + "." + tblName); + throw new SemanticException( + String.join("", "No current SessionState, cannot create temporary table ", tName.getDbTable())); } Map tables = SessionHiveMetaStoreClient. - getTempTablesForDatabase(dbName, tblName); - if (tables != null && tables.containsKey(tblName)) { - throw new SemanticException("Temporary table " + dbName + "." + tblName - + " already exists"); + getTempTablesForDatabase(tName.getDb(), tName.getTable()); + if (tables != null && tables.containsKey(tName.getTable())) { + throw new SemanticException(String.join("", "Temporary table ", tName.getDbTable(), " already exists")); } } } else { // Verify that the table does not already exist // dumpTable is only used to check the conflict for non-temporary tables try { - Table dumpTable = db.newTable(dbDotTab); - if (null != db.getTable(dumpTable.getDbName(), dumpTable.getTableName(), false) && !ctx.isExplainSkipExecution()) { - throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(dbDotTab)); + Table dumpTable = db.newTable(tName.getDbTable()); + if (null != db.getTable(dumpTable.getDbName(), dumpTable.getTableName(), false) && !ctx + .isExplainSkipExecution()) { + throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(tName.getDbTable())); } } catch (HiveException e) { throw new SemanticException(e); @@ -13547,10 +13542,10 @@ ASTNode analyzeCreateTable( "Partition columns can only declared using their names in CTAS statements"); } - tblProps = validateAndAddDefaultProperties( - tblProps, isExt, storageFormat, dbDotTab, sortCols, isMaterialization, isTemporary, isTransactional); - addDbAndTabToOutputs(qualifiedTabName, TableType.MANAGED_TABLE, isTemporary, tblProps); - tableDesc = new CreateTableDesc(qualifiedTabName[0], dbDotTab, isExt, isTemporary, cols, + tblProps = validateAndAddDefaultProperties(tblProps, isExt, storageFormat, tName.getDbTable(), sortCols, + isMaterialization, isTemporary, isTransactional); + addDbAndTabToOutputs(tName, TableType.MANAGED_TABLE, isTemporary, tblProps); + tableDesc = new CreateTableDesc(tName.getDb(), tName.getTable(), isExt, isTemporary, cols, partColNames, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), @@ -13572,12 +13567,12 @@ ASTNode analyzeCreateTable( } /** Adds entities for create table/create view. */ - private void addDbAndTabToOutputs(String[] qualifiedTabName, TableType type, + private void addDbAndTabToOutputs(TableName tableName, TableType type, boolean isTemporary, Map tblProps) throws SemanticException { - Database database = getDatabase(qualifiedTabName[0]); + Database database = getDatabase(tableName.getDb()); outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_SHARED)); - Table t = new Table(qualifiedTabName[0], qualifiedTabName[1]); + Table t = new Table(tableName.getDb(), tableName.getTable()); t.setParameters(tblProps); t.setTableType(type); t.setTemporary(isTemporary); @@ -13585,8 +13580,7 @@ private void addDbAndTabToOutputs(String[] qualifiedTabName, TableType type, } protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCtx) throws SemanticException { - String[] qualTabName = getQualifiedTableName((ASTNode) ast.getChild(0)); - String dbDotTable = getDotName(qualTabName); + final TableName tName = getQualifiedTableName((ASTNode) ast.getChild(0)); List cols = null; boolean ifNotExists = false; boolean rewriteEnabled = true; @@ -13602,7 +13596,7 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt RowFormatParams rowFormatParams = new RowFormatParams(); StorageFormat storageFormat = new StorageFormat(conf); - LOG.info("Creating view " + dbDotTable + " position=" + LOG.info("Creating view " + tName.getDbTable() + " position=" + ast.getCharPositionInLine()); int numCh = ast.getChildCount(); for (int num = 1; num < numCh; num++) { @@ -13670,9 +13664,9 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt // Verify that the table does not already exist // dumpTable is only used to check the conflict for non-temporary tables try { - Table dumpTable = db.newTable(dbDotTable); + Table dumpTable = db.newTable(tName.getDbTable()); if (null != db.getTable(dumpTable.getDbName(), dumpTable.getTableName(), false) && !ctx.isExplainSkipExecution()) { - throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(dbDotTable)); + throw new SemanticException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(tName.getDbTable())); } } catch (HiveException e) { throw new SemanticException(e); @@ -13689,21 +13683,21 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt if (isMaterialized) { createVwDesc = new CreateViewDesc( - dbDotTable, cols, comment, tblProps, partColNames, + tName.getDbTable(), cols, comment, tblProps, partColNames, ifNotExists, isRebuild, rewriteEnabled, isAlterViewAs, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps()); - addDbAndTabToOutputs(qualTabName, TableType.MATERIALIZED_VIEW, false, tblProps); + addDbAndTabToOutputs(tName, TableType.MATERIALIZED_VIEW, false, tblProps); queryState.setCommandType(HiveOperation.CREATE_MATERIALIZED_VIEW); } else { createVwDesc = new CreateViewDesc( - dbDotTable, cols, comment, tblProps, partColNames, + tName.getDbTable(), cols, comment, tblProps, partColNames, ifNotExists, orReplace, isAlterViewAs, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), storageFormat.getSerde()); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), createVwDesc))); - addDbAndTabToOutputs(qualTabName, TableType.VIRTUAL_VIEW, false, tblProps); + addDbAndTabToOutputs(tName, TableType.VIRTUAL_VIEW, false, tblProps); queryState.setCommandType(HiveOperation.CREATEVIEW); } qb.setViewDesc(createVwDesc); @@ -15209,15 +15203,15 @@ public boolean isValidQueryCaching() { */ protected String getFullTableNameForSQL(ASTNode n) throws SemanticException { switch (n.getType()) { - case HiveParser.TOK_TABNAME: - String[] tableName = getQualifiedTableName(n); - return getDotName(new String[] { - HiveUtils.unparseIdentifier(tableName[0], this.conf), - HiveUtils.unparseIdentifier(tableName[1], this.conf) }); - case HiveParser.TOK_TABREF: - return getFullTableNameForSQL((ASTNode) n.getChild(0)); - default: - throw raiseWrongType("TOK_TABNAME", n); + case HiveParser.TOK_TABNAME: + final TableName initDbTbl = getQualifiedTableName(n); + final TableName tName = TableName.fromString(HiveUtils.unparseIdentifier(initDbTbl.getTable(), this.conf), + SessionState.get().getCurrentCatalog(), HiveUtils.unparseIdentifier(initDbTbl.getDb(), this.conf)); + return tName.getDbTable(); + case HiveParser.TOK_TABREF: + return getFullTableNameForSQL((ASTNode) n.getChild(0)); + default: + throw raiseWrongType("TOK_TABNAME", n); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index cc676c55f8..85e5f256a5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -24,6 +24,7 @@ import org.apache.commons.collections.*; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.HiveStatsUtils; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -448,12 +449,12 @@ private Path getDefaultCtasLocation(final ParseContext pCtx) throws SemanticExce } else if (pCtx.getQueryProperties().isMaterializedView()) { protoName = pCtx.getCreateViewDesc().getViewName(); } - String[] names = Utilities.getDbTableName(protoName); - if (!db.databaseExists(names[0])) { - throw new SemanticException("ERROR: The database " + names[0] + " does not exist."); + final TableName tn = Utilities.getTableName(protoName); + if (!db.databaseExists(tn.getDb())) { + throw new SemanticException("ERROR: The database " + tn.getDb() + " does not exist."); } Warehouse wh = new Warehouse(conf); - return wh.getDefaultTablePath(db.getDatabase(names[0]), names[1], isExternal); + return wh.getDefaultTablePath(db.getDatabase(tn.getDb()), tn.getTable(), isExternal); } catch (HiveException e) { throw new SemanticException(e); } catch (MetaException e) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java index 18ed6fb418..bb4b99024b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java @@ -24,6 +24,7 @@ import java.util.Map; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -62,7 +63,7 @@ // Assumes one instance of this + single-threaded compilation for each query. private final Hive db; - public HiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { + public HiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { this.db = db; } @@ -242,8 +243,8 @@ protected PrivilegeObjectDesc parsePrivObject(ASTNode ast) throws SemanticExcept ASTNode gchild = (ASTNode)child.getChild(0); if (child.getType() == HiveParser.TOK_TABLE_TYPE) { subject.setTable(true); - String[] qualified = BaseSemanticAnalyzer.getQualifiedTableName(gchild); - subject.setObject(BaseSemanticAnalyzer.getDotName(qualified)); + final TableName tName = BaseSemanticAnalyzer.getQualifiedTableName(gchild); + subject.setObject(tName.getDbTable()); } else if (child.getType() == HiveParser.TOK_URI_TYPE || child.getType() == HiveParser.TOK_SERVER_TYPE) { throw new SemanticException("Hive authorization does not support the URI or SERVER objects"); } else { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/UpdatedMetaDataTracker.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/UpdatedMetaDataTracker.java index 1f206984ff..5bcdaef262 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/UpdatedMetaDataTracker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/UpdatedMetaDataTracker.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.parse.repl.load; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hive.common.util.HiveStringUtils; import java.util.ArrayList; @@ -38,8 +39,9 @@ private String tableName; private List> partitionsList; - UpdateMetaData(String replState, String dbName, String tableName, Map partSpec) { + UpdateMetaData(String replState, String dbName, String tableName, Map partSpec) throws SemanticException { this.replState = replState; + TableName.fromString(tableName, null, dbName); this.dbName = dbName; this.tableName = tableName; this.partitionsList = new ArrayList<>(); @@ -140,7 +142,7 @@ private String getKey(String dbName, String tableName) { if (tableName == null) { return dbName + ".*"; } - return dbName + "." + tableName; + return TableName.fromString(tableName, null, dbName).getDbTable(); } private String normalizeIdentifier(String name) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java index bba769244b..32b33233ad 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java @@ -22,6 +22,7 @@ import java.util.Collections; import java.util.List; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; @@ -67,8 +68,8 @@ fk.setFktable_name(actualTblName); } - AlterTableDesc addConstraintsDesc = new AlterTableDesc(actualDbName + "." + actualTblName, new ArrayList(), fks, - new ArrayList(), context.eventOnlyReplicationSpec()); + AlterTableDesc addConstraintsDesc = new AlterTableDesc(TableName.getDbTable(actualDbName, actualTblName), + new ArrayList(), fks, new ArrayList(), context.eventOnlyReplicationSpec()); Task addConstraintsTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); tasks.add(addConstraintsTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java index 90d9008a31..8271e1036b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java @@ -22,6 +22,7 @@ import java.util.Collections; import java.util.List; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; @@ -65,7 +66,7 @@ nn.setTable_name(actualTblName); } - AlterTableDesc addConstraintsDesc = new AlterTableDesc(actualDbName + "." + actualTblName, + AlterTableDesc addConstraintsDesc = new AlterTableDesc(TableName.getDbTable(actualDbName, actualTblName), new ArrayList(), new ArrayList(), new ArrayList(), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java index e8966ad7c4..65cd8df180 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java @@ -22,6 +22,7 @@ import java.util.Collections; import java.util.List; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; @@ -62,8 +63,8 @@ pk.setTable_name(actualTblName); } - AlterTableDesc addConstraintsDesc = new AlterTableDesc(actualDbName + "." + actualTblName, pks, new ArrayList(), - new ArrayList(), context.eventOnlyReplicationSpec()); + AlterTableDesc addConstraintsDesc = new AlterTableDesc(TableName.getDbTable(actualDbName, actualTblName), pks, + new ArrayList(), new ArrayList(), context.eventOnlyReplicationSpec()); Task addConstraintsTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); tasks.add(addConstraintsTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java index 81f1c5ab20..aab3bf5d43 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java @@ -22,6 +22,7 @@ import java.util.Collections; import java.util.List; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; @@ -62,7 +63,7 @@ uk.setTable_name(actualTblName); } - AlterTableDesc addConstraintsDesc = new AlterTableDesc(actualDbName + "." + actualTblName, new ArrayList(), new ArrayList(), + AlterTableDesc addConstraintsDesc = new AlterTableDesc(TableName.getDbTable(actualDbName, actualTblName), new ArrayList(), new ArrayList(), uks, context.eventOnlyReplicationSpec()); Task addConstraintsTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java index 5f9f879f6f..8abc188e7f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.parse.repl.load.message; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.messaging.DropConstraintMessage; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; @@ -37,7 +38,7 @@ String actualTblName = context.isTableNameEmpty() ? msg.getTable() : context.tableName; String constraintName = msg.getConstraint(); - AlterTableDesc dropConstraintsDesc = new AlterTableDesc(actualDbName + "." + actualTblName, constraintName, + AlterTableDesc dropConstraintsDesc = new AlterTableDesc(TableName.getDbTable(actualDbName, actualTblName), constraintName, context.eventOnlyReplicationSpec()); Task dropConstraintsTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, dropConstraintsDesc), context.hiveConf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java index b95a35a688..73ed8a20b0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropPartitionHandler.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.parse.repl.load.message; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; @@ -43,7 +44,7 @@ Map> partSpecs = ReplUtils.genPartSpecs(new Table(msg.getTableObj()), msg.getPartitions()); if (partSpecs.size() > 0) { - DropTableDesc dropPtnDesc = new DropTableDesc(actualDbName + "." + actualTblName, + DropTableDesc dropPtnDesc = new DropTableDesc(TableName.getDbTable(actualDbName, actualTblName), partSpecs, null, true, context.eventOnlyReplicationSpec()); Task dropPtnTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, dropPtnDesc), context.hiveConf diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java index 62784e950d..772cfdcb00 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropTableHandler.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.parse.repl.load.message; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.messaging.DropTableMessage; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; @@ -35,10 +36,8 @@ DropTableMessage msg = deserializer.getDropTableMessage(context.dmd.getPayload()); String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName; String actualTblName = context.isTableNameEmpty() ? msg.getTable() : context.tableName; - DropTableDesc dropTableDesc = new DropTableDesc( - actualDbName + "." + actualTblName, - null, true, true, context.eventOnlyReplicationSpec(), false - ); + DropTableDesc dropTableDesc = new DropTableDesc(TableName.getDbTable(actualDbName, actualTblName), + null, true, true, context.eventOnlyReplicationSpec(), false); Task dropTableTask = TaskFactory.get( new DDLWork(readEntitySet, writeEntitySet, dropTableDesc), context.hiveConf ); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java index 1125f6909d..5d97d1e15b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/RenamePartitionHandler.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.parse.repl.load.message; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; import org.apache.hadoop.hive.ql.exec.Task; @@ -43,7 +44,7 @@ Map newPartSpec = new LinkedHashMap<>(); Map oldPartSpec = new LinkedHashMap<>(); - String tableName = actualDbName + "." + actualTblName; + String tableName = TableName.getDbTable(actualDbName, actualTblName); try { Iterator beforeIterator = msg.getPtnObjBefore().getValuesIterator(); Iterator afterIterator = msg.getPtnObjAfter().getValuesIterator(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java index dec6ed5ccc..a6ec71c99d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncatePartitionHandler.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.parse.repl.load.message; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; import org.apache.hadoop.hive.ql.exec.Task; @@ -56,7 +57,7 @@ } TruncateTableDesc truncateTableDesc = new TruncateTableDesc( - actualDbName + "." + actualTblName, partSpec, + TableName.getDbTable(actualDbName, actualTblName), partSpec, context.eventOnlyReplicationSpec()); truncateTableDesc.setWriteId(msg.getWriteId()); Task truncatePtnTask = TaskFactory.get( diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java index f037cbb08b..17f7d026d4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/TruncateTableHandler.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.parse.repl.load.message; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; @@ -35,8 +36,7 @@ String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName; String actualTblName = context.isTableNameEmpty() ? msg.getTable() : context.tableName; - TruncateTableDesc truncateTableDesc = new TruncateTableDesc( - actualDbName + "." + actualTblName, + TruncateTableDesc truncateTableDesc = new TruncateTableDesc(TableName.getDbTable(actualDbName, actualTblName), null, context.eventOnlyReplicationSpec()); truncateTableDesc.setWriteId(msg.getWriteId()); Task truncateTableTask = TaskFactory.get( diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java index 93641af215..0a675c0adb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java @@ -38,7 +38,6 @@ import com.google.common.collect.ImmutableList; import java.io.Serializable; import java.util.ArrayList; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -119,7 +118,7 @@ boolean first; String afterCol; boolean expectView; - HashMap partSpec; + Map partSpec; private String newLocation; boolean protectModeEnable; ProtectModeType protectModeType; @@ -159,7 +158,7 @@ public AlterTableDesc() { * @param newType * @throws SemanticException */ - public AlterTableDesc(String tblName, HashMap partSpec, + public AlterTableDesc(String tblName, Map partSpec, String oldColName, String newColName, String newType, String newComment, boolean first, String afterCol, boolean isCascade) throws SemanticException { super(); @@ -175,7 +174,7 @@ public AlterTableDesc(String tblName, HashMap partSpec, this.isCascade = isCascade; } - public AlterTableDesc(String tblName, HashMap partSpec, + public AlterTableDesc(String tblName, Map partSpec, String oldColName, String newColName, String newType, String newComment, boolean first, String afterCol, boolean isCascade, List primaryKeyCols, List foreignKeyCols, List uniqueConstraintCols, @@ -226,7 +225,7 @@ public AlterTableDesc(String oldName, String newName, boolean expectView, Replic * new columns to be added * @throws SemanticException */ - public AlterTableDesc(String name, HashMap partSpec, List newCols, + public AlterTableDesc(String name, Map partSpec, List newCols, AlterTableTypes alterType, boolean isCascade) throws SemanticException { op = alterType; setOldName(name); @@ -262,7 +261,7 @@ public AlterTableDesc(AlterTableTypes alterType) { * @param partSpec * Partition specifier with map of key and values. */ - public AlterTableDesc(AlterTableTypes alterType, HashMap partSpec, boolean expectView) { + public AlterTableDesc(AlterTableTypes alterType, Map partSpec, boolean expectView) { op = alterType; this.partSpec = partSpec; this.expectView = expectView; @@ -280,7 +279,7 @@ public AlterTableDesc(AlterTableTypes alterType, HashMap partSpe * @throws SemanticException */ public AlterTableDesc(String name, String inputFormat, String outputFormat, - String serdeName, String storageHandler, HashMap partSpec) throws SemanticException { + String serdeName, String storageHandler, Map partSpec) throws SemanticException { super(); op = AlterTableTypes.ADDFILEFORMAT; setOldName(name); @@ -292,7 +291,7 @@ public AlterTableDesc(String name, String inputFormat, String outputFormat, } public AlterTableDesc(String tableName, int numBuckets, - List bucketCols, List sortCols, HashMap partSpec) throws SemanticException { + List bucketCols, List sortCols, Map partSpec) throws SemanticException { setOldName(tableName); op = AlterTableTypes.ADDCLUSTERSORTCOLUMN; numberBuckets = numBuckets; @@ -301,7 +300,7 @@ public AlterTableDesc(String tableName, int numBuckets, this.partSpec = partSpec; } - public AlterTableDesc(String tableName, boolean sortingOff, HashMap partSpec) throws SemanticException { + public AlterTableDesc(String tableName, boolean sortingOff, Map partSpec) throws SemanticException { setOldName(tableName); op = AlterTableTypes.ADDCLUSTERSORTCOLUMN; isTurnOffSorting = sortingOff; @@ -309,7 +308,7 @@ public AlterTableDesc(String tableName, boolean sortingOff, HashMap partSpec) throws SemanticException { + Map partSpec) throws SemanticException { op = AlterTableTypes.ALTERLOCATION; setOldName(tableName); this.newLocation = newLocation; @@ -317,7 +316,7 @@ public AlterTableDesc(String tableName, String newLocation, } public AlterTableDesc(String tableName, Map, String> locations, - HashMap partSpec) throws SemanticException { + Map partSpec) throws SemanticException { op = AlterTableTypes.ALTERSKEWEDLOCATION; setOldName(tableName); this.skewedLocations = locations; @@ -333,7 +332,7 @@ public AlterTableDesc(String tableName, boolean turnOffSkewed, this.skewedColValues = new ArrayList>(skewedColValues); } - public AlterTableDesc(String tableName, HashMap partSpec, int numBuckets) throws SemanticException { + public AlterTableDesc(String tableName, Map partSpec, int numBuckets) throws SemanticException { op = AlterTableTypes.ALTERBUCKETNUM; setOldName(tableName); this.partSpec = partSpec; @@ -415,7 +414,7 @@ public String getOldName() { */ public void setOldName(String oldName) throws SemanticException { // Make sure we qualify the name from the outset so there's no ambiguity. - this.oldName = String.join(".", Utilities.getDbTableName(oldName)); + this.oldName = Utilities.getTableName(oldName).getDbTable(); } /** @@ -769,14 +768,14 @@ public void setExpectView(boolean expectView) { /** * @return part specification */ - public HashMap getPartSpec() { + public Map getPartSpec() { return partSpec; } /** * @param partSpec */ - public void setPartSpec(HashMap partSpec) { + public void setPartSpec(Map partSpec) { this.partSpec = partSpec; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java index 40def601e6..931c4c9247 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/BasicStatsWork.java @@ -195,7 +195,7 @@ public String getTableName() { if (work.getLoadTableDesc() != null) { return work.getLoadTableDesc().getTable().getTableName(); } else if (work.getTableSpecs() != null) { - return work.getTableSpecs().tableName; + return work.getTableSpecs().tableName.getDbTable(); } else if (getLoadFileDesc().getCtasCreateTableDesc() != null) { return getLoadFileDesc().getCtasCreateTableDesc().getTableName(); } else { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsUpdateWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsUpdateWork.java index 1219b620f9..ea7ee46348 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsUpdateWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ColumnStatsUpdateWork.java @@ -21,7 +21,9 @@ import java.io.Serializable; import java.util.Map; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -39,8 +41,7 @@ private static final long serialVersionUID = 1L; private final String partName; private final Map mapProp; - private final String dbName; - private final String tableName; + private final TableName tableName; private final String colName; private final String colType; private final ColumnStatistics colStats; @@ -51,11 +52,14 @@ public ColumnStatsUpdateWork(String partName, String dbName, String tableName, String colName, - String colType) { + String colType) throws SemanticException { this.partName = partName; this.mapProp = mapProp; - this.dbName = dbName; - this.tableName = tableName; + try { + this.tableName = TableName.fromString(tableName, null, dbName); + } catch (IllegalArgumentException e) { + throw new SemanticException(e.getCause()); + } this.colName = colName; this.colType = colType; this.colStats = null; @@ -65,8 +69,7 @@ public ColumnStatsUpdateWork(ColumnStatistics colStats) { this.colStats = colStats; this.partName = null; this.mapProp = null; - this.dbName = null; - this.tableName = null; + this.tableName = new TableName(null, null, null); this.colName = null; this.colType = null; } @@ -84,11 +87,7 @@ public String getPartName() { return mapProp; } - public String dbName() { - return dbName; - } - - public String getTableName() { + public TableName getTableName() { return tableName; } @@ -109,7 +108,7 @@ public void setWriteId(long writeId) { @Override public String getFullTableName() { - return dbName + "." + tableName; + return tableName.getDbTable(); } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java index c71ff6d713..bd9048c875 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java @@ -27,6 +27,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.PartitionManagementTask; import org.apache.hadoop.hive.metastore.TableType; @@ -719,16 +720,14 @@ public boolean isCTAS() { } public Table toTable(HiveConf conf) throws HiveException { - String databaseName = getDatabaseName(); - String tableName = getTableName(); - - if (databaseName == null || tableName.contains(".")) { - String[] names = Utilities.getDbTableName(tableName); - databaseName = names[0]; - tableName = names[1]; + final TableName tName; + try { + tName = TableName.fromString(getTableName(), null, getDatabaseName()); + } catch (IllegalArgumentException e) { + throw new HiveException(e.getCause()); } - Table tbl = new Table(databaseName, tableName); + Table tbl = new Table(tName.getDb(), tName.getTable()); if (getTblProps() != null) { tbl.getTTable().getParameters().putAll(getTblProps()); @@ -756,11 +755,10 @@ public Table toTable(HiveConf conf) throws HiveException { if (getSerName() == null) { if (storageHandler == null) { serDeClassName = PlanUtils.getDefaultSerDe().getName(); - LOG.info("Default to " + serDeClassName + " for table " + tableName); + LOG.info("Default to " + serDeClassName + " for table " + tName.getTable()); } else { serDeClassName = storageHandler.getSerDeClass().getName(); - LOG.info("Use StorageHandler-supplied " + serDeClassName - + " for table " + tableName); + LOG.info("Use StorageHandler-supplied " + serDeClassName + " for table " + tName.getTable()); } } else { // let's validate that the serde exists diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java index 7130aba597..430e091b7e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java @@ -25,10 +25,10 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.DDLTask; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -332,11 +332,9 @@ public ReplicationSpec getReplicationSpec(){ } public Table toTable(HiveConf conf) throws HiveException { - String[] names = Utilities.getDbTableName(getViewName()); - String databaseName = names[0]; - String tableName = names[1]; + final TableName tName = Utilities.getTableName(getViewName()); - Table tbl = new Table(databaseName, tableName); + Table tbl = new Table(tName.getDb(), tName.getTable()); tbl.setViewOriginalText(getViewOriginalText()); tbl.setViewExpandedText(getViewExpandedText()); if (isMaterialized()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java index 5c30fca2d3..143b051306 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java @@ -24,6 +24,7 @@ import java.util.Map; import com.google.common.collect.ImmutableSet; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -35,9 +36,9 @@ import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.session.SessionState; /** * ImportTableDesc. @@ -56,75 +57,76 @@ public ImportTableDesc(String dbName, Table table) throws Exception { this.table = table; switch (getDescType()) { - case TABLE: - this.createTblDesc = new CreateTableDesc(dbName, - table.getTableName(), - false, // isExternal: set to false here, can be overwritten by the IMPORT stmt - false, - table.getSd().getCols(), - table.getPartitionKeys(), - table.getSd().getBucketCols(), - table.getSd().getSortCols(), - table.getSd().getNumBuckets(), - null, null, null, null, null, // these 5 delims passed as serde params + case TABLE: + this.createTblDesc = new CreateTableDesc(dbName, + table.getTableName(), + false, // isExternal: set to false here, can be overwritten by the IMPORT stmt + false, + table.getSd().getCols(), + table.getPartitionKeys(), + table.getSd().getBucketCols(), + table.getSd().getSortCols(), + table.getSd().getNumBuckets(), + // these next 5 delims passed as serde params + null, null, null, null, null, + null, // comment passed as table params + table.getSd().getInputFormat(), + table.getSd().getOutputFormat(), + null, // location: set to null here, can be overwritten by the IMPORT stmt + table.getSd().getSerdeInfo().getSerializationLib(), + null, // storagehandler passed as table params + table.getSd().getSerdeInfo().getParameters(), + table.getParameters(), false, + (null == table.getSd().getSkewedInfo()) ? null : table.getSd().getSkewedInfo() + .getSkewedColNames(), + (null == table.getSd().getSkewedInfo()) ? null : table.getSd().getSkewedInfo() + .getSkewedColValues(), + null, + null, + null, + null, + null, + null, + table.getColStats()); + this.createTblDesc.setStoredAsSubDirectories(table.getSd().isStoredAsSubDirectories()); + break; + case VIEW: + final TableName tName = + TableName.fromString(table.getTableName(), SessionState.get().getCurrentCatalog(), dbName); + if (table.isMaterializedView()) { + this.createViewDesc = new CreateViewDesc(tName.getDbTable(), + table.getAllCols(), null, // comment passed as table params + table.getParameters(), + table.getPartColNames(), + false,false,false,false, table.getSd().getInputFormat(), table.getSd().getOutputFormat(), null, // location: set to null here, can be overwritten by the IMPORT stmt table.getSd().getSerdeInfo().getSerializationLib(), null, // storagehandler passed as table params - table.getSd().getSerdeInfo().getParameters(), - table.getParameters(), false, - (null == table.getSd().getSkewedInfo()) ? null : table.getSd().getSkewedInfo() - .getSkewedColNames(), - (null == table.getSd().getSkewedInfo()) ? null : table.getSd().getSkewedInfo() - .getSkewedColValues(), - null, - null, - null, - null, - null, - null, - table.getColStats()); - this.createTblDesc.setStoredAsSubDirectories(table.getSd().isStoredAsSubDirectories()); - break; - case VIEW: - String[] qualViewName = { dbName, table.getTableName() }; - String dbDotView = BaseSemanticAnalyzer.getDotName(qualViewName); - if (table.isMaterializedView()) { - this.createViewDesc = new CreateViewDesc(dbDotView, - table.getAllCols(), - null, // comment passed as table params - table.getParameters(), - table.getPartColNames(), - false,false,false,false, - table.getSd().getInputFormat(), - table.getSd().getOutputFormat(), - null, // location: set to null here, can be overwritten by the IMPORT stmt - table.getSd().getSerdeInfo().getSerializationLib(), - null, // storagehandler passed as table params - table.getSd().getSerdeInfo().getParameters()); + table.getSd().getSerdeInfo().getParameters()); // TODO: If the DB name from the creation metadata for any of the tables has changed, // we should update it. Currently it refers to the source database name. - this.createViewDesc.setTablesUsed(table.getCreationMetadata() != null ? - table.getCreationMetadata().getTablesUsed() : ImmutableSet.of()); - } else { - this.createViewDesc = new CreateViewDesc(dbDotView, - table.getAllCols(), - null, // comment passed as table params - table.getParameters(), - table.getPartColNames(), - false,false,false, - table.getSd().getInputFormat(), - table.getSd().getOutputFormat(), - table.getSd().getSerdeInfo().getSerializationLib()); - } - - this.setViewAsReferenceText(dbName, table); - this.createViewDesc.setPartCols(table.getPartCols()); - break; - default: - throw new HiveException("Invalid table type"); + this.createViewDesc.setTablesUsed(table.getCreationMetadata() != null ? + table.getCreationMetadata().getTablesUsed() : ImmutableSet.of()); + } else { + this.createViewDesc = new CreateViewDesc(tName.getDbTable(), + table.getAllCols(), + null, // comment passed as table params + table.getParameters(), + table.getPartColNames(), + false,false,false, + table.getSd().getInputFormat(), + table.getSd().getOutputFormat(), + table.getSd().getSerdeInfo().getSerializationLib()); + } + + this.setViewAsReferenceText(dbName, table); + this.createViewDesc.setPartCols(table.getPartCols()); + break; + default: + throw new HiveException("Invalid table type"); } } @@ -199,9 +201,8 @@ public void setTableName(String tableName) throws SemanticException { createTblDesc.setTableName(tableName); break; case VIEW: - String[] qualViewName = { dbName, tableName }; - String dbDotView = BaseSemanticAnalyzer.getDotName(qualViewName); - createViewDesc.setViewName(dbDotView); + final TableName tName = TableName.fromString(tableName, SessionState.get().getCurrentCatalog(), dbName); + createViewDesc.setViewName(tName.getDbTable()); break; } } @@ -212,8 +213,7 @@ public String getTableName() throws SemanticException { return createTblDesc.getTableName(); case VIEW: String dbDotView = createViewDesc.getViewName(); - String[] names = Utilities.getDbTableName(dbDotView); - return names[1]; // names[0] have the Db name and names[1] have the view name + return Utilities.getTableName(dbDotView).getTable(); } return null; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java index a4a31a5d5f..658dc86cd7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/RenamePartitionDesc.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.plan; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.plan.DDLDesc.DDLDescWithWriteId; @@ -32,12 +33,11 @@ private static final long serialVersionUID = 1L; - private String tableName; + private TableName tableName; private String location; private LinkedHashMap oldPartSpec; private LinkedHashMap newPartSpec; private ReplicationSpec replicationSpec; - private String fqTableName; private long writeId; /** @@ -57,17 +57,17 @@ public RenamePartitionDesc() { */ public RenamePartitionDesc(String tableName, Map oldPartSpec, Map newPartSpec, ReplicationSpec replicationSpec, Table table) { - this.tableName = tableName; - this.oldPartSpec = new LinkedHashMap(oldPartSpec); - this.newPartSpec = new LinkedHashMap(newPartSpec); + this.oldPartSpec = new LinkedHashMap<>(oldPartSpec); + this.newPartSpec = new LinkedHashMap<>(newPartSpec); this.replicationSpec = replicationSpec; - this.fqTableName = table != null ? (table.getDbName() + "." + table.getTableName()) : tableName; + this.tableName = table != null ? TableName.fromString(table.getTableName(), null, table.getDbName()) + : TableName.fromString(tableName, null, null); } /** * @return the table we're going to add the partitions to. */ - public String getTableName() { + public TableName getTableName() { return tableName; } @@ -129,7 +129,7 @@ public void setWriteId(long writeId) { @Override public String getFullTableName() { - return fqTableName; + return tableName.getDbTable(); } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java index 9b45f43026..78b0f09359 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/StatsWork.java @@ -22,6 +22,7 @@ import java.util.HashSet; import java.util.Set; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.exec.Task; @@ -154,7 +155,7 @@ public boolean getStatsReliable() { } public String getFullTableName() { - return table.getDbName() + "." + table.getTableName(); + return TableName.getDbTable(table.getDbName(),table.getTableName()); } public Task getSourceTask() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java index 192c04c661..7d59da609f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java @@ -26,6 +26,7 @@ import java.util.Map; import java.util.Objects; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.type.DataTypePhysicalVariation; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.Table; @@ -386,7 +387,7 @@ public boolean getIsMetadataOnly() { @Signature public String getQualifiedTable() { - return dbName + "." + tableName; + return TableName.getDbTable(dbName, tableName); } public Table getTableMetadata() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/DefaultHiveAuthorizationTranslator.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/DefaultHiveAuthorizationTranslator.java index 853dcf8a81..0d64f9bbf5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/DefaultHiveAuthorizationTranslator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/DefaultHiveAuthorizationTranslator.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.List; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.PrincipalDesc; @@ -64,7 +65,8 @@ public HivePrivilegeObject getHivePrivilegeObject(PrivilegeObjectDesc privSubjec dbTable = new String[] {null, null}; } else { if (privSubjectDesc.getTable()) { - dbTable = Utilities.getDbTableName(privSubjectDesc.getObject()); + final TableName tName = Utilities.getTableName(privSubjectDesc.getObject()); + dbTable = new String[] {tName.getDb(), tName.getTable()}; } else { dbTable = new String[] {privSubjectDesc.getObject(), null}; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java index 6eb1ca2645..1fab450220 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.Warehouse; @@ -193,7 +194,7 @@ private String getAggregationPrefix(Table table, Partition partition) throws Met private String getAggregationPrefix0(Table table, Partition partition) throws MetaException { // prefix is of the form dbName.tblName - String prefix = table.getDbName() + "." + MetaStoreUtils.encodeTableName(table.getTableName()); + String prefix = TableName.getDbTable(table.getDbName(), MetaStoreUtils.encodeTableName(table.getTableName())); // FIXME: this is a secret contract; reusein getAggrKey() creates a more closer relation to the StatsGatherer // prefix = work.getAggKey(); prefix = prefix.toLowerCase(); @@ -250,7 +251,7 @@ private int aggregateStats(Hive db) { List partitions = getPartitionsList(db); - String tableFullName = table.getDbName() + "." + table.getTableName(); + final String tableFullName = TableName.getDbTable(table.getDbName(), table.getTableName()); List partishes = new ArrayList<>(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java index 7c1944fcca..dafeab2c36 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java @@ -575,7 +575,7 @@ public String makeFullPartName() { public String buildCommand() { // Catalogs cannot be parsed as part of the query. Seems to be a bug. - String cmd = "analyze table " + tableName.getDb() + "." + tableName.getTable(); + String cmd = "analyze table " + TableName.getDbTable(tableName.getDb(), tableName.getTable()); assert partName == null || allParts == null; if (partName != null) { cmd += " partition(" + partName + ")"; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java index f52b023b86..39f5a4115c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java @@ -39,13 +39,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.JavaUtils; -import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.common.StringableMap; -import org.apache.hadoop.hive.common.ValidCompactorWriteIdList; -import org.apache.hadoop.hive.common.ValidReadTxnList; -import org.apache.hadoop.hive.common.ValidTxnList; -import org.apache.hadoop.hive.common.ValidWriteIdList; +import org.apache.hadoop.hive.common.*; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.IMetaStoreClient; @@ -414,8 +408,8 @@ private void runCrudCompaction(HiveConf hiveConf, Table t, Partition p, StorageD private void runMmCompaction(HiveConf conf, Table t, Partition p, StorageDescriptor sd, ValidWriteIdList writeIds, CompactionInfo ci) throws IOException { - LOG.debug("Going to delete directories for aborted transactions for MM table " - + t.getDbName() + "." + t.getTableName()); + LOG.debug("Going to delete directories for aborted transactions for MM table {}", + TableName.fromString(t.getTableName(), null, t.getDbName()).getDbTable()); AcidUtils.Directory dir = AcidUtils.getAcidState(new Path(sd.getLocation()), conf, writeIds, Ref.from(false), false, t.getParameters()); removeFilesForMmTable(conf, dir); @@ -518,7 +512,7 @@ private String buildCrudMajorCompactionCreateTableQuery(String fullName, Table t } private String buildCrudMajorCompactionQuery(HiveConf conf, Table t, Partition p, String tmpName) { - String fullName = t.getDbName() + "." + t.getTableName(); + String fullName = TableName.fromString(t.getTableName(), null, t.getDbName()).getDbTable(); String query = "insert into table " + tmpName + " "; String filter = ""; if (p != null) { @@ -697,7 +691,7 @@ private String buildMmCompactionCtQuery( } private String buildMmCompactionQuery(HiveConf conf, Table t, Partition p, String tmpName) { - String fullName = t.getDbName() + "." + t.getTableName(); + String fullName = TableName.fromString(t.getTableName(), null, t.getDbName()).getDbTable(); // TODO: ideally we should make a special form of insert overwrite so that we: // 1) Could use fast merge path for ORC and RC. // 2) Didn't have to create a table. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java index 80025b7046..d444720a50 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.LogUtils; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.cli.CommonCliOptions; import org.apache.hadoop.hive.conf.HiveConf; @@ -1035,7 +1036,7 @@ private TxnCtx generateTxnCtxForAlter( TxnCtx result = null; try { ValidTxnList txns = msc.getValidTxns(txnId); - String fqn = table.getDbName() + "." + table.getTableName(); + String fqn = TableName.getDbTable(table.getDbName(), table.getTableName()); List writeIdsObj = msc.getValidWriteIds( Lists.newArrayList(fqn), txns.toString()); String validWriteIds = TxnCommonUtils.createValidTxnWriteIdList(txnId, writeIdsObj) diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java index 305b467439..ab24917b59 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hive.ql.io.*; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork; import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; @@ -69,7 +70,6 @@ import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.PartitionDesc; import org.apache.hadoop.hive.ql.plan.TableDesc; -import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFFromUtcTimestamp; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.mapred.JobConf; @@ -131,31 +131,11 @@ public void testSerializeTimestamp() { } @Test - public void testgetDbTableName() throws HiveException{ - String tablename; - String [] dbtab; - SessionState.start(new HiveConf(this.getClass())); - String curDefaultdb = SessionState.get().getCurrentDatabase(); - - //test table without db portion - tablename = "tab1"; - dbtab = Utilities.getDbTableName(tablename); - assertEquals("db name", curDefaultdb, dbtab[0]); - assertEquals("table name", tablename, dbtab[1]); - - //test table with db portion - tablename = "dab1.tab1"; - dbtab = Utilities.getDbTableName(tablename); - assertEquals("db name", "dab1", dbtab[0]); - assertEquals("table name", "tab1", dbtab[1]); - - //test invalid table name - tablename = "dab1.tab1.x1"; + public void testGetTableName() throws HiveException{ try { - dbtab = Utilities.getDbTableName(tablename); + Utilities.getTableName(null); fail("exception was expected for invalid table name"); - } catch(HiveException ex){ - assertEquals("Invalid table name " + tablename, ex.getMessage()); + } catch(SemanticException ex){ } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/PrivilegesTestBase.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/PrivilegesTestBase.java index ff41919eeb..4d6a4aa0e1 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/PrivilegesTestBase.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/PrivilegesTestBase.java @@ -19,6 +19,7 @@ import junit.framework.Assert; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.ql.QueryState; @@ -32,7 +33,7 @@ public class PrivilegesTestBase { protected static final String DB = "default"; protected static final String TABLE = "table1"; - protected static final String TABLE_QNAME = DB + "." + TABLE; + protected static final String TABLE_QNAME = TableName.getDbTable(DB, TABLE); protected static final String USER = "user1"; public static void grantUserTable(String privStr, PrivilegeType privType, QueryState queryState, Hive db) diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestHiveAuthorizationTaskFactory.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestHiveAuthorizationTaskFactory.java index 9a8c032623..012de44df1 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestHiveAuthorizationTaskFactory.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestHiveAuthorizationTaskFactory.java @@ -19,6 +19,7 @@ import java.util.HashMap; +import org.apache.hadoop.hive.common.TableName; import org.junit.Assert; import org.apache.hadoop.hive.conf.HiveConf; @@ -83,7 +84,7 @@ public static void reset() { private static final String SELECT = "SELECT"; private static final String DB = "default"; private static final String TABLE = "table1"; - private static final String TABLE_QNAME = DB + "." + TABLE; + private static final String TABLE_QNAME = TableName.getDbTable(DB, TABLE); private static final String GROUP = "group1"; private static final String ROLE = "role1"; private static final String USER = "user1"; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java b/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java index 24acd6da3b..704c2e60b3 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java @@ -29,6 +29,7 @@ import org.apache.curator.shaded.com.google.common.collect.Lists; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; @@ -151,7 +152,7 @@ public void testTxnTable() throws Exception { + "(\"transactional\"=\"true\", \"transactional_properties\"=\"insert_only\")"); executeQuery("insert into simple_stats (s) values ('test')"); List cols = Lists.newArrayList("s"); - String dbName = ss.getCurrentDatabase(), tblName = "simple_stats", fqName = dbName + "." + tblName; + String dbName = ss.getCurrentDatabase(), tblName = "simple_stats", fqName = TableName.getDbTable(dbName, tblName); ValidWriteIdList initialWriteIds = msClient.getValidWriteIds(fqName); verifyStatsUpToDate(tblName, cols, msClient, initialWriteIds.toString(), true); assertFalse(su.runOneIteration()); @@ -244,7 +245,7 @@ public void testTxnPartitions() throws Exception { drainWorkQueue(su, 0); // Overwrite the txn state to refer to an aborted txn on some partitions. - String dbName = ss.getCurrentDatabase(), tblName = "simple_stats", fqName = dbName + "." + tblName; + String dbName = ss.getCurrentDatabase(), tblName = "simple_stats", fqName = TableName.getDbTable(dbName, tblName); long badTxnId = msClient.openTxn("moo"); long badWriteId = msClient.allocateTableWriteId(badTxnId, dbName, tblName); msClient.abortTxns(Lists.newArrayList(badTxnId)); diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/TableName.java b/storage-api/src/java/org/apache/hadoop/hive/common/TableName.java index f5cb192561..7a32e6aaff 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/common/TableName.java +++ b/storage-api/src/java/org/apache/hadoop/hive/common/TableName.java @@ -22,29 +22,35 @@ * includes utilities for string parsing. */ public class TableName { + + /** Exception message thrown. */ + private static final String ILL_ARG_EXCEPTION_MSG = + "Table name must be either , . " + "or .."; + + /** Names of the related DB objects. */ private final String cat; private final String db; private final String table; /** * - * @param cat catalog name. Cannot be null. If you do not know it you can get it from + * @param catName catalog name. Cannot be null. If you do not know it you can get it from * SessionState.getCurrentCatalog() if you want to use the catalog from the current * session, or from MetaStoreUtils.getDefaultCatalog() if you do not have a session * or want to use the default catalog for the Hive instance. - * @param db database name. Cannot be null. If you do not now it you can get it from + * @param dbName database name. Cannot be null. If you do not now it you can get it from * SessionState.getCurrentDatabase() or use Warehouse.DEFAULT_DATABASE_NAME. - * @param table table name, cannot be null + * @param tableName table name, cannot be null */ - public TableName(String cat, String db, String table) { - this.cat = cat; - this.db = db; - this.table = table; + public TableName(final String catName, final String dbName, final String tableName) { + this.cat = catName; + this.db = dbName; + this.table = tableName; } /** * Build a TableName from a string of the form [[catalog.]database.]table. - * @param name name in string form + * @param name name in string form, not null * @param defaultCatalog default catalog to use if catalog is not in the name. If you do not * know it you can get it from SessionState.getCurrentCatalog() if you * want to use the catalog from the current session, or from @@ -54,17 +60,21 @@ public TableName(String cat, String db, String table) { * not now it you can get it from SessionState.getCurrentDatabase() or * use Warehouse.DEFAULT_DATABASE_NAME. * @return TableName + * @throws IllegalArgumentException if a non-null name is given */ - public static TableName fromString(String name, String defaultCatalog, String defaultDatabase) { + public static TableName fromString(final String name, final String defaultCatalog, final String defaultDatabase) + throws IllegalArgumentException { + if (name == null) { + throw new IllegalArgumentException(String.join("", "Table value was null. ", ILL_ARG_EXCEPTION_MSG)); + } if (name.contains(DatabaseName.CAT_DB_TABLE_SEPARATOR)) { - String names[] = name.split("\\."); + String[] names = name.split("\\."); if (names.length == 2) { return new TableName(defaultCatalog, names[0], names[1]); } else if (names.length == 3) { return new TableName(names[0], names[1], names[2]); } else { - throw new RuntimeException("Table name must be either , . " + - "or .."); + throw new IllegalArgumentException(ILL_ARG_EXCEPTION_MSG); } } else { @@ -89,7 +99,13 @@ public String getTable() { */ public String getDbTable() { return db + DatabaseName.CAT_DB_TABLE_SEPARATOR + table; + } + /** + * Get the name in db.table format, if db is not empty, otherwise pass only the table name. + */ + public String getNotEmptyDbTable() { + return db == null || db.isEmpty() ? table : db + DatabaseName.CAT_DB_TABLE_SEPARATOR + table; } /** @@ -97,7 +113,6 @@ public String getDbTable() { */ public static String getDbTable(String dbName, String tableName) { return dbName + DatabaseName.CAT_DB_TABLE_SEPARATOR + tableName; - } public static String getQualified(String catName, String dbName, String tableName) { diff --git a/storage-api/src/test/org/apache/hadoop/hive/common/TestTableName.java b/storage-api/src/test/org/apache/hadoop/hive/common/TestTableName.java index 0a8cb2a82e..f19c7358c9 100644 --- a/storage-api/src/test/org/apache/hadoop/hive/common/TestTableName.java +++ b/storage-api/src/test/org/apache/hadoop/hive/common/TestTableName.java @@ -22,7 +22,7 @@ public class TestTableName { @Test - public void fullname() { + public void fullName() { TableName name = new TableName("cat", "db", "t"); Assert.assertEquals("cat", name.getCat()); Assert.assertEquals("db", name.getDb()); @@ -47,5 +47,24 @@ public void fromString() { Assert.assertEquals("cat", name.getCat()); Assert.assertEquals("db", name.getDb()); Assert.assertEquals("tab", name.getTable()); + + try { + TableName.fromString(null, null, null); + Assert.fail("Name can't be null"); + } catch (IllegalArgumentException e) { + Assert.assertTrue(true); + } + } + + @Test + public void testNotEmptyDbTable() { + TableName name = new TableName("cat", "db", "t"); + Assert.assertEquals("db.t", name.getNotEmptyDbTable()); + + name = new TableName("cat", null, "t"); + Assert.assertEquals("t", name.getNotEmptyDbTable()); + + name = new TableName("cat", "", "t"); + Assert.assertEquals("t", name.getNotEmptyDbTable()); } }