diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index 7ce66983d0..8370f087d4 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -18,10 +18,7 @@ package org.apache.hadoop.hive.ql.parse; import org.apache.commons.io.FileUtils; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.*; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; @@ -3497,6 +3494,33 @@ public void testAuthForNotificationAPIs() throws Exception { } } + @Test + public void testRecycleFileDropTempTable() throws IOException { + String testName = "tempTable"; + String dbName = createDB(testName, driver); + + run("CREATE TABLE " + dbName + ".normal(a int)", driver); + run("INSERT INTO " + dbName + ".normal values (1)", driver); + run("DROP TABLE " + dbName + ".normal", driver); + + String cmDir = hconf.getVar(HiveConf.ConfVars.REPLCMDIR); + Path path = new Path(cmDir); + FileSystem fs = path.getFileSystem(hconf); + ContentSummary cs = fs.getContentSummary(path); + long fileCount = cs.getFileCount(); + + assertTrue(fileCount != 0); + + run("CREATE TEMPORARY TABLE " + dbName + ".temp(a int)", driver); + run("INSERT INTO " + dbName + ".temp values (1)", driver); + run("DROP TABLE " + dbName + ".temp", driver); + + cs = fs.getContentSummary(path); + long fileCountAfter = cs.getFileCount(); + + assertTrue(fileCount == fileCountAfter); + } + private NotificationEvent createDummyEvent(String dbname, String tblname, long evid) { MessageFactory msgFactory = MessageFactory.getInstance(); Table t = new Table(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index 80c7804dc1..20dd436649 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -643,7 +643,7 @@ private void dropTempTable(org.apache.hadoop.hive.metastore.api.Table table, boo if (envContext != null){ ifPurge = Boolean.parseBoolean(envContext.getProperties().get("ifPurge")); } - getWh().deleteDir(tablePath, true, ifPurge); + getWh().deleteDir(tablePath, true, ifPurge, false); } catch (Exception err) { LOG.error("Failed to delete temp table directory: " + tablePath, err); // Forgive error diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java index 27d1ec8130..4e5e1c2227 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java @@ -32,7 +32,8 @@ * TODO: this is currently used for both drop table and drop partitions. */ @Explain(displayName = "Drop Table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) -public class DropTableDesc extends DDLDesc implements Serializable { +public class +DropTableDesc extends DDLDesc implements Serializable { private static final long serialVersionUID = 1L; public static class PartSpec { diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 0683440575..9fa5e06551 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -2037,7 +2037,7 @@ private void deleteTableData(Path tablePath, boolean ifPurge) { if (tablePath != null) { try { - wh.deleteDir(tablePath, true, ifPurge); + wh.deleteDir(tablePath, true, ifPurge, true); } catch (Exception e) { LOG.error("Failed to delete table directory: " + tablePath + " " + e.getMessage()); @@ -2067,7 +2067,7 @@ private void deletePartitionData(List partPaths, boolean ifPurge) { if (partPaths != null && !partPaths.isEmpty()) { for (Path partPath : partPaths) { try { - wh.deleteDir(partPath, true, ifPurge); + wh.deleteDir(partPath, true, ifPurge, true); } catch (Exception e) { LOG.error("Failed to delete partition directory: " + partPath + " " + e.getMessage()); @@ -3508,10 +3508,10 @@ private boolean drop_partition_common(RawStore ms, String db_name, String tbl_na // The original directory was saved in params if (isArchived) { assert (archiveParentDir != null); - wh.deleteDir(archiveParentDir, true, mustPurge); + wh.deleteDir(archiveParentDir, true, mustPurge, true); } else { assert (partPath != null); - wh.deleteDir(partPath, true, mustPurge); + wh.deleteDir(partPath, true, mustPurge, true); deleteParentRecursive(partPath.getParent(), part_vals.size() - 1, mustPurge); } // ok even if the data is not deleted @@ -3543,7 +3543,7 @@ private static boolean isMustPurge(EnvironmentContext envContext, Table tbl) { private void deleteParentRecursive(Path parent, int depth, boolean mustPurge) throws IOException, MetaException { if (depth > 0 && parent != null && wh.isWritable(parent)) { if (wh.isDir(parent) && wh.isEmpty(parent)) { - wh.deleteDir(parent, true, mustPurge); + wh.deleteDir(parent, true, mustPurge, true); } deleteParentRecursive(parent.getParent(), depth - 1, mustPurge); } @@ -3693,7 +3693,7 @@ public DropPartitionsResult drop_partitions_req( // Archived partitions have har:/to_har_file as their location. // The original directory was saved in params for (Path path : archToDelete) { - wh.deleteDir(path, true, mustPurge); + wh.deleteDir(path, true, mustPurge, true); } for (PathAndPartValSize p : dirsToDelete) { wh.deleteDir(p.path, true, mustPurge); diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java index 2d52e0edda..797519d5c7 100755 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java @@ -224,11 +224,14 @@ void addToChangeManagement(Path file) throws MetaException { } public boolean deleteDir(Path f, boolean recursive) throws MetaException { - return deleteDir(f, recursive, false); + return deleteDir(f, recursive, false, true); } - public boolean deleteDir(Path f, boolean recursive, boolean ifPurge) throws MetaException { - cm.recycle(f, RecycleType.MOVE, ifPurge); + public boolean deleteDir(Path f, boolean recursive, boolean ifPurge, boolean needCmRecycle) throws MetaException { + // no need to create the CM recycle file for temporary tables + if (needCmRecycle) { + cm.recycle(f, RecycleType.MOVE, ifPurge); + } FileSystem fs = getFs(f); return fsHandler.deleteDir(fs, f, recursive, ifPurge, conf); }