diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index 00620528ae..39d077a024 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; @@ -3507,6 +3508,57 @@ public void testAuthForNotificationAPIs() throws Exception { } } + @Test + public void testRecycleFileDropTempTable() throws IOException { + String dbName = createDB(testName.getMethodName(), driver); + + run("CREATE TABLE " + dbName + ".normal(a int)", driver); + run("INSERT INTO " + dbName + ".normal values (1)", driver); + run("DROP TABLE " + dbName + ".normal", driver); + + String cmDir = hconf.getVar(HiveConf.ConfVars.REPLCMDIR); + Path path = new Path(cmDir); + FileSystem fs = path.getFileSystem(hconf); + ContentSummary cs = fs.getContentSummary(path); + long fileCount = cs.getFileCount(); + + assertTrue(fileCount != 0); + + run("CREATE TABLE " + dbName + ".normal(a int)", driver); + run("INSERT INTO " + dbName + ".normal values (1)", driver); + + run("CREATE TEMPORARY TABLE " + dbName + ".temp(a int)", driver); + run("INSERT INTO " + dbName + ".temp values (2)", driver); + run("INSERT OVERWRITE TABLE " + dbName + ".temp select * from " + dbName + ".normal", driver); + + cs = fs.getContentSummary(path); + long fileCountAfter = cs.getFileCount(); + + assertTrue(fileCount == fileCountAfter); + + run("INSERT INTO " + dbName + ".temp values (3)", driver); + run("TRUNCATE TABLE " + dbName + ".temp", driver); + + cs = fs.getContentSummary(path); + fileCountAfter = cs.getFileCount(); + assertTrue(fileCount == fileCountAfter); + + run("INSERT INTO " + dbName + ".temp values (4)", driver); + run("ALTER TABLE " + dbName + ".temp RENAME to " + dbName + ".temp1", driver); + verifyRun("SELECT count(*) from " + dbName + ".temp1", new String[]{"1"}, driver); + + cs = fs.getContentSummary(path); + fileCountAfter = cs.getFileCount(); + assertTrue(fileCount == fileCountAfter); + + run("INSERT INTO " + dbName + ".temp1 values (5)", driver); + run("DROP TABLE " + dbName + ".temp1", driver); + + cs = fs.getContentSummary(path); + fileCountAfter = cs.getFileCount(); + assertTrue(fileCount == fileCountAfter); + } + private NotificationEvent createDummyEvent(String dbname, String tblname, long evid) { MessageFactory msgFactory = MessageFactory.getInstance(); Table t = new Table(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 23983d85b3..eecef18ee1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1881,7 +1881,7 @@ else if(!isAcidIUDoperation && isFullAcidTable) { // base_x. (there is Insert Overwrite and Load Data Overwrite) boolean isAutoPurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge")); replaceFiles(tbl.getPath(), loadPath, destPath, oldPartPath, getConf(), - isSrcLocal, isAutoPurge, newFiles, filter, isMmTableWrite?true:false); + isSrcLocal, isAutoPurge, newFiles, filter, isMmTableWrite?true:false, !tbl.isTemporary()); } else { FileSystem fs = tbl.getDataLocation().getFileSystem(conf); copyFiles(conf, loadPath, destPath, fs, isSrcLocal, isAcidIUDoperation, @@ -2426,7 +2426,7 @@ else if(!isAcidIUDoperation && isFullAcidTable) { //todo: should probably do the same for MM IOW boolean isAutopurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge")); replaceFiles(tblPath, loadPath, destPath, tblPath, - sessionConf, isSrcLocal, isAutopurge, newFiles, filter, isMmTable?true:false); + sessionConf, isSrcLocal, isAutopurge, newFiles, filter, isMmTable?true:false, !tbl.isTemporary()); } else { try { FileSystem fs = tbl.getDataLocation().getFileSystem(sessionConf); @@ -3949,7 +3949,7 @@ private static void moveAcidFiles(String deltaFileType, PathFilter pathFilter, F */ protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, HiveConf conf, boolean isSrcLocal, boolean purge, List newFiles, PathFilter deletePathFilter, - boolean isMmTableOverwrite) throws HiveException { + boolean isMmTableOverwrite, boolean isNeedRecycle) throws HiveException { try { FileSystem destFs = destf.getFileSystem(conf); @@ -3970,7 +3970,7 @@ protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, if (oldPath != null) { // Note: we assume lbLevels is 0 here. Same as old code for non-MM. // For MM tables, this can only be a LOAD command. Does LOAD even support LB? - deleteOldPathForReplace(destf, oldPath, conf, purge, deletePathFilter, isMmTableOverwrite, 0); + deleteOldPathForReplace(destf, oldPath, conf, purge, deletePathFilter, isMmTableOverwrite, 0, isNeedRecycle); } // first call FileUtils.mkdir to make sure that destf directory exists, if not, it creates @@ -4016,7 +4016,7 @@ protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, } private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf, boolean purge, - PathFilter pathFilter, boolean isMmTableOverwrite, int lbLevels) throws HiveException { + PathFilter pathFilter, boolean isMmTableOverwrite, int lbLevels, boolean isNeedRecycle) throws HiveException { Utilities.FILE_OP_LOGGER.debug("Deleting old paths for replace in " + destPath + " and old path " + oldPath); boolean isOldPathUnderDestf = false; @@ -4030,7 +4030,7 @@ private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf, isOldPathUnderDestf = isSubDir(oldPath, destPath, oldFs, destFs, false); if (isOldPathUnderDestf || isMmTableOverwrite) { if (lbLevels == 0 || !isMmTableOverwrite) { - cleanUpOneDirectoryForReplace(oldPath, oldFs, pathFilter, conf, purge); + cleanUpOneDirectoryForReplace(oldPath, oldFs, pathFilter, conf, purge, isNeedRecycle); } } } catch (IOException e) { @@ -4047,8 +4047,8 @@ private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf, private void cleanUpOneDirectoryForReplace(Path path, FileSystem fs, - PathFilter pathFilter, HiveConf conf, boolean purge) throws IOException, HiveException { - if (conf.getBoolVar(HiveConf.ConfVars.REPLCMENABLED)) { + PathFilter pathFilter, HiveConf conf, boolean purge, boolean isNeedRecycle) throws IOException, HiveException { + if (isNeedRecycle && conf.getBoolVar(HiveConf.ConfVars.REPLCMENABLED)) { recycleDirToCmPath(path, purge); } FileStatus[] statuses = fs.listStatus(path, pathFilter); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index a22d06836b..d79b6ed059 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -643,7 +643,7 @@ private void dropTempTable(org.apache.hadoop.hive.metastore.api.Table table, boo if (envContext != null){ ifPurge = Boolean.parseBoolean(envContext.getProperties().get("ifPurge")); } - getWh().deleteDir(tablePath, true, ifPurge); + getWh().deleteDir(tablePath, true, ifPurge, false); } catch (Exception err) { LOG.error("Failed to delete temp table directory: " + tablePath, err); // Forgive error diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java index 2d52e0edda..20c10607bb 100755 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java @@ -228,7 +228,14 @@ public boolean deleteDir(Path f, boolean recursive) throws MetaException { } public boolean deleteDir(Path f, boolean recursive, boolean ifPurge) throws MetaException { - cm.recycle(f, RecycleType.MOVE, ifPurge); + return deleteDir(f, recursive, ifPurge, true); + } + + public boolean deleteDir(Path f, boolean recursive, boolean ifPurge, boolean needCmRecycle) throws MetaException { + // no need to create the CM recycle file for temporary tables + if (needCmRecycle) { + cm.recycle(f, RecycleType.MOVE, ifPurge); + } FileSystem fs = getFs(f); return fsHandler.deleteDir(fs, f, recursive, ifPurge, conf); } diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java index 1cc2843702..b549c7cf87 100644 --- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java @@ -148,8 +148,8 @@ public void createFile(Path path, String content) throws IOException { * @throws MetaException IO failure */ public void cleanWarehouseDirs() throws MetaException { - warehouse.deleteDir(getWarehouseRoot(), true, true); - warehouse.deleteDir(trashDir, true, true); + warehouse.deleteDir(getWarehouseRoot(), true, true, false); + warehouse.deleteDir(trashDir, true, true, false); } /**