diff --git ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java index 1e279f38f8..0ab77e84c6 100644 --- ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java +++ ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java @@ -78,9 +78,9 @@ public synchronized Partition add_partition(Partition partition) throws TExcepti return client.add_partition(partition); } - public synchronized void alter_partition(String dbName, String tblName, + public synchronized void alter_partition(String catName, String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext, String writeIdList) throws TException { - client.alter_partition(dbName, tblName, newPart, environmentContext, writeIdList); + client.alter_partition(catName, dbName, tblName, newPart, environmentContext, writeIdList); } public synchronized LockResponse checkLock(long lockid) throws TException { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 467f7280ef..58c3ae1787 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -1435,7 +1435,7 @@ private int alterTableAlterPart(Hive db, AlterTableAlterPartDesc alterPartitionD */ private int touch(Hive db, AlterTableSimpleDesc touchDesc) throws HiveException { - + // TODO: catalog Table tbl = db.getTable(touchDesc.getTableName()); EnvironmentContext environmentContext = new EnvironmentContext(); environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); @@ -1450,7 +1450,8 @@ private int touch(Hive db, AlterTableSimpleDesc touchDesc) throw new HiveException("Specified partition does not exist"); } try { - db.alterPartition(touchDesc.getTableName(), part, environmentContext, true); + db.alterPartition(tbl.getCatalogName(), tbl.getDbName(), tbl.getTableName(), + part, environmentContext, true); } catch (InvalidOperationException e) { throw new HiveException(e); } @@ -1799,6 +1800,7 @@ private int archive(Hive db, AlterTableSimpleDesc simpleDesc, authority.toString(), harPartitionDir.getPath()); // make in Path to ensure no slash at the end setArchived(p, harPath, partSpecInfo.values.size()); + // TODO: catalog db.alterPartition(simpleDesc.getTableName(), p, null, true); } } catch (Exception e) { @@ -2005,6 +2007,7 @@ private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc) for(Partition p: partitions) { setUnArchived(p); try { + // TODO: catalog db.alterPartition(simpleDesc.getTableName(), p, null, true); } catch (InvalidOperationException e) { throw new HiveException(e); @@ -4766,6 +4769,7 @@ private void dropTable(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveExc } // drop the table + // TODO: API w/catalog name db.dropTable(dropTbl.getTableName(), dropTbl.getIfPurge()); if (tbl != null) { // Remove from cache if it is a materialized view diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index d2c04e22de..827721f3e8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -798,7 +798,8 @@ private void updatePartitionBucketSortColumns(Hive db, Table table, Partition pa } if (updateBucketCols || updateSortCols) { - db.alterPartition(table.getDbName(), table.getTableName(), partn, null, true); + db.alterPartition(table.getCatalogName(), table.getDbName(), table.getTableName(), + partn, null, true); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java index ea0b2c357c..36a92aab57 100644 --- ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java +++ ql/src/java/org/apache/hadoop/hive/ql/hooks/UpdateInputAccessTimeHook.java @@ -73,7 +73,7 @@ public void run(HookContext hookContext) throws Exception { Table t = db.getTable(dbName, tblName); p = db.getPartition(t, p.getSpec(), false); p.setLastAccessTime(lastAccessTime); - db.alterPartition(dbName, tblName, p, null, false); + db.alterPartition(null, dbName, tblName, p, null, false); t.setLastAccessTime(lastAccessTime); db.alterTable(dbName + "." + tblName, t, false, null, false); break; diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 9b82080ffb..36dc694964 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -682,11 +682,12 @@ public void updateCreationMetadata(String dbName, String tableName, CreationMeta * if the changes in metadata is not acceptable * @throws TException */ + @Deprecated public void alterPartition(String tblName, Partition newPart, EnvironmentContext environmentContext, boolean transactional) throws InvalidOperationException, HiveException { String[] names = Utilities.getDbTableName(tblName); - alterPartition(names[0], names[1], newPart, environmentContext, transactional); + alterPartition(null, names[0], names[1], newPart, environmentContext, transactional); } /** @@ -706,10 +707,13 @@ public void alterPartition(String tblName, Partition newPart, * if the changes in metadata is not acceptable * @throws TException */ - public void alterPartition(String dbName, String tblName, Partition newPart, + public void alterPartition(String catName, String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext, boolean transactional) throws InvalidOperationException, HiveException { try { + if (catName == null) { + catName = getDefaultCatalog(conf); + } validatePartition(newPart); String location = newPart.getLocation(); if (location != null) { @@ -728,7 +732,7 @@ public void alterPartition(String dbName, String tblName, Partition newPart, LOG.warn("Cannot get a table snapshot for " + tblName); } } - getSynchronizedMSC().alter_partition( + getSynchronizedMSC().alter_partition(catName, dbName, tblName, newPart.getTPartition(), environmentContext, tableSnapshot == null ? null : tableSnapshot.getValidWriteIdList()); @@ -849,6 +853,7 @@ public void renamePartition(Table tbl, Map oldPartSpec, Partitio } } + // TODO: this whole path won't work with catalogs public void alterDatabase(String dbName, Database db) throws HiveException { try { @@ -872,6 +877,8 @@ public void createTable(Table tbl) throws HiveException { createTable(tbl, false); } + // TODO: from here down dozens of methods do not support catalog. I got tired marking them. + /** * Creates the table with the given objects. It takes additional arguments for * primary keys and foreign keys associated with the table. @@ -1071,12 +1078,12 @@ public void dropTable(String dbName, String tableName, boolean deleteData, public void truncateTable(String dbDotTableName, Map partSpec) throws HiveException { try { Table table = getTable(dbDotTableName, true); - // TODO: we should refactor code to make sure snapshot is always obtained in the same layer e.g. Hive.java AcidUtils.TableSnapshot snapshot = null; if (AcidUtils.isTransactionalTable(table)) { snapshot = AcidUtils.getTableSnapshot(conf, table, true); } + // TODO: APIs with catalog names List partNames = ((null == partSpec) ? null : getPartitionNames(table.getDbName(), table.getTableName(), partSpec, (short) -1)); if (snapshot == null) { @@ -1130,6 +1137,7 @@ public Table getTable(final String tableName, boolean throwException) throws Hiv * if there's an internal error or if the table doesn't exist */ public Table getTable(final String dbName, final String tableName) throws HiveException { + // TODO: catalog... etc everywhere if (tableName.contains(".")) { String[] names = Utilities.getDbTableName(tableName); return this.getTable(names[0], names[1], true); @@ -2117,7 +2125,7 @@ private void setStatsPropAndAlterPartition(boolean hasFollowingStatsTask, Table ec.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE); } LOG.debug("Altering existing partition " + newTPart.getSpec()); - getSynchronizedMSC().alter_partition( + getSynchronizedMSC().alter_partition(tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), newTPart.getTPartition(), new EnvironmentContext(), tableSnapshot == null ? null : tableSnapshot.getValidWriteIdList()); } @@ -2591,6 +2599,7 @@ public Partition createPartition(Table tbl, Map partSpec) throws } public List createPartitions(AddPartitionDesc addPartitionDesc) throws HiveException { + // TODO: catalog name everywhere in this method Table tbl = getTable(addPartitionDesc.getDbName(), addPartitionDesc.getTableName()); int size = addPartitionDesc.getPartitionCount(); List in = @@ -2799,7 +2808,8 @@ private void alterPartitionSpec(Table tbl, if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) { fullName = tbl.getFullyQualifiedName(); } - alterPartition(fullName, new Partition(tbl, tpart), null, true); + alterPartition(tbl.getCatalogName(), tbl.getDbName(), tbl.getTableName(), + new Partition(tbl, tpart), null, true); } private void alterPartitionSpecInMemory(Table tbl, diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java index 0d80ed3c82..e7a33f5b5a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java @@ -271,6 +271,7 @@ private void analyzeAcidExport(ASTNode ast) throws SemanticException { if(ctx.isExplainPlan()) { try { //so that "explain" doesn't "leak" tmp tables + // TODO: catalog db.dropTable(newTable.getDbName(), newTable.getTableName(), true, true, true); } catch(HiveException ex) { LOG.warn("Unable to drop " + newTableName + " due to: " + ex.getMessage(), ex); diff --git ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java index 2a737bbf8a..806464ce79 100644 --- ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java +++ ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java @@ -241,7 +241,7 @@ static RunOptions createRunOptions(CommandLine cli) throws Exception { } String oldWarehouseRoot = cli.getOptionValue("oldWarehouseRoot"); boolean dryRun = cli.hasOption("dryRun"); - + RunOptions runOpts = new RunOptions( dbRegex, tableRegex, @@ -499,7 +499,7 @@ boolean shouldModifyDatabaseLocation(Database dbObj) throws IOException, MetaExc oldDefaultDbLocation, curWhRootPath, dbName); } } - } + } return false; } @@ -576,7 +576,7 @@ void createExternalDbDir(Database dbObj) throws IOException, MetaException { void moveTableData(Database dbObj, Table tableObj, Path newTablePath) throws HiveException, IOException, TException { String dbName = tableObj.getDbName(); String tableName = tableObj.getTableName(); - + Path oldTablePath = new Path(tableObj.getSd().getLocation()); LOG.info("Moving location of {} from {} to {}", getQualifiedName(tableObj), oldTablePath, newTablePath); @@ -909,8 +909,8 @@ void updateTableLocation(Table table, Path newLocation) throws HiveException { modifiedTable, false, null, false); } - void updatePartitionLocation(String dbName, Table table, String partName, Partition part, Path newLocation) - throws HiveException, TException { + void updatePartitionLocation(String dbName, Table table, String partName, + Partition part, Path newLocation) throws HiveException, TException { String msg = String.format("ALTER TABLE %s PARTITION (%s) SET LOCATION '%s'", getQualifiedName(table), partName, newLocation.toString()); LOG.info(msg); @@ -920,7 +920,8 @@ void updatePartitionLocation(String dbName, Table table, String partName, Partit new org.apache.hadoop.hive.ql.metadata.Table(table), part); modifiedPart.setLocation(newLocation.toString()); - hive.alterPartition(dbName, table.getTableName(), modifiedPart, null, false); + hive.alterPartition( + table.getCatName(), dbName, table.getTableName(), modifiedPart, null, false); } void updateTableProperties(Table table, Map props) throws HiveException { @@ -1100,7 +1101,7 @@ static void checkAndSetFileOwnerPermissions(FileSystem fs, FileStatus fStatus, if (isDir && recurse) { for (FileStatus subFile : fs.listStatus(path)) { // TODO: Use threadpool for more concurrency? - // TODO: check/set all files, or only directories + // TODO: check/set all files, or only directories checkAndSetFileOwnerPermissions(fs, subFile, userName, groupName, dirPerms, filePerms, dryRun, recurse); } } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 5ae00af564..faf6810420 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -2061,11 +2061,12 @@ public void alter_partition(String catName, String dbName, String tblName, Parti } @Override - public void alter_partition(String dbName, String tblName, Partition newPart, + public void alter_partition(String catName, String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext, String writeIdList) throws InvalidOperationException, MetaException, TException { AlterPartitionsRequest req = new AlterPartitionsRequest( dbName, tblName, Lists.newArrayList(newPart)); + req.setCatName(catName); req.setEnvironmentContext(environmentContext); req.setValidWriteIdList(writeIdList); client.alter_partitions_req(req); diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 91405b9a33..ac10da2f3e 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -534,6 +534,7 @@ void dropTable(String dbname, String tableName, boolean deleteData, * @throws TException * A thrift communication error occurred */ + @Deprecated // TODO: deprecate all methods without a catalog here; a single layer (e.g. Hive.java) should handle current-catalog void dropTable(String dbname, String tableName, boolean deleteData, boolean ignoreUnknownTab, boolean ifPurge) throws MetaException, TException, NoSuchObjectException; @@ -2079,7 +2080,7 @@ void alter_partition(String dbName, String tblName, Partition newPart, Environme throws InvalidOperationException, MetaException, TException; - void alter_partition(String dbName, String tblName, Partition newPart, + void alter_partition(String catName, String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext, String writeIdList) throws InvalidOperationException, MetaException, TException; diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index 35abd006d4..6e0d9c1848 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -3516,7 +3516,7 @@ public void alter_table(String catName, String databaseName, String tblName, Tab } @Override - public void alter_partition(String dbName, String tblName, Partition newPart, + public void alter_partition(String catName, String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext, String writeIdList) throws InvalidOperationException, MetaException, TException { throw new UnsupportedOperationException();