diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java index be7134f..8e68ed3 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java @@ -89,6 +89,7 @@ public static void setup() throws Exception { File workDir = handleWorkDir(); conf.set("yarn.scheduler.capacity.root.queues", "default"); conf.set("yarn.scheduler.capacity.root.default.capacity", "100"); + conf.set("fs.pfile.impl", "org.apache.hadoop.fs.ProxyLocalFileSystem"); fs = FileSystem.get(conf); System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath()); diff --git hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/tool/TestTempletonUtils.java hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/tool/TestTempletonUtils.java index af952f2..49155b3 100644 --- hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/tool/TestTempletonUtils.java +++ hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/tool/TestTempletonUtils.java @@ -315,10 +315,10 @@ public void testPropertiesParsing() throws Exception { @Test public void testFindContainingJar() throws Exception { String result = TempletonUtils.findContainingJar(ShimLoader.class, ".*hive-shims.*"); - Assert.assertNotNull(result); + Assert.assertNotNull("cannot find jar for ShimLoader class in .*hive-shims.*", result); result = TempletonUtils.findContainingJar(HadoopShimsSecure.class, ".*hive-shims.*"); - Assert.assertNotNull(result); + Assert.assertNotNull("cannot find jar for HadoopShimsSecure class in .*hive-shims.*", result); result = TempletonUtils.findContainingJar(HadoopShimsSecure.class, ".*unknownjar.*"); - Assert.assertNull(result); + Assert.assertNull("unexpectedly found jar for HadoopShimsSecure class: " + result, result); } } diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHiveServer2.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHiveServer2.java index da51a55..5e1b260 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHiveServer2.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/miniHS2/TestHiveServer2.java @@ -63,7 +63,7 @@ public void testConnection() throws Exception { String tabName = "testTab1"; CLIServiceClient serviceClient = miniHS2.getServiceClient(); SessionHandle sessHandle = serviceClient.openSession("foo", "bar"); - serviceClient.executeStatement(sessHandle, "DROP TABLE IF EXISTS tab", confOverlay); + serviceClient.executeStatement(sessHandle, "DROP TABLE IF EXISTS " + tabName, confOverlay); serviceClient.executeStatement(sessHandle, "CREATE TABLE " + tabName + " (id INT)", confOverlay); OperationHandle opHandle = serviceClient.executeStatement(sessHandle, "SHOW TABLES", confOverlay); RowSet rowSet = serviceClient.fetchResults(opHandle); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 84ef5f9..9379cfb 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -1407,7 +1407,7 @@ private void drop_table_core(final RawStore ms, final String dbname, final Strin // Delete the data in the partitions which have other locations deletePartitionData(partPaths); // Delete the data in the table - deleteTableData(tblPath); + deleteTableData(tblPath, envContext); // ok even if the data is not deleted } for (MetaStoreEventListener listener : listeners) { @@ -1424,9 +1424,24 @@ private void drop_table_core(final RawStore ms, final String dbname, final Strin * @param tablePath */ private void deleteTableData(Path tablePath) { + deleteTableData(tablePath, null); + } + + /** + * Deletes the data in a table's location, if it fails logs an error + * + * @param tablePath + * @param envContext + */ + private void deleteTableData(Path tablePath, EnvironmentContext envContext) { + if (tablePath != null) { try { - wh.deleteDir(tablePath, true); + boolean ifPurge = false; + if (envContext != null){ + ifPurge = Boolean.parseBoolean(envContext.getProperties().get("ifPurge")); + } + wh.deleteDir(tablePath, true, ifPurge); } catch (Exception e) { LOG.error("Failed to delete table directory: " + tablePath + " " + e.getMessage()); @@ -1441,10 +1456,25 @@ private void deleteTableData(Path tablePath) { * @param partPaths */ private void deletePartitionData(List partPaths) { + deletePartitionData(partPaths, null); + } + + /** + * Give a list of partitions' locations, tries to delete each one + * and for each that fails logs an error. + * + * @param partPaths + * @param envContext + */ + private void deletePartitionData(List partPaths, EnvironmentContext envContext) { if (partPaths != null && !partPaths.isEmpty()) { for (Path partPath : partPaths) { try { - wh.deleteDir(partPath, true); + boolean ifPurge = false; + if (envContext != null){ + ifPurge = Boolean.parseBoolean(envContext.getProperties().get("ifPurge")); + } + wh.deleteDir(partPath, true, ifPurge); } catch (Exception e) { LOG.error("Failed to delete partition directory: " + partPath + " " + e.getMessage()); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 237166e..bedce33 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -799,6 +799,34 @@ public void dropTable(String dbname, String name, boolean deleteData, dropTable(dbname, name, deleteData, ignoreUnknownTab, null); } + /** + * @param dbname + * @param name + * @param deleteData + * delete the underlying data or just delete the table in metadata + * @param ifPurge + * completely purge the table (skipping trash) while removing data from warehouse + * @throws NoSuchObjectException + * @throws ExistingDependentsException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String, + * java.lang.String, boolean) + */ + public void dropTable(String dbname, String name, boolean deleteData, + boolean ignoreUnknownTab, boolean ifPurge) throws MetaException, TException, + NoSuchObjectException, UnsupportedOperationException { + //build new environmentContext with ifPurge; + EnvironmentContext envContext = null; + if(ifPurge){ + Map warehouseOptions = null; + warehouseOptions = new HashMap(); + warehouseOptions.put("ifPurge", "TRUE"); + envContext = new EnvironmentContext(warehouseOptions); + } + dropTable(dbname, name, deleteData, ignoreUnknownTab, envContext); + } + public void dropTable(String dbname, String name, boolean deleteData, boolean ignoreUnknownTab, EnvironmentContext envContext) throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java index cff0718..c3598db 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java @@ -37,12 +37,14 @@ @Override public boolean deleteDir(FileSystem fs, Path f, boolean recursive, - Configuration conf) throws MetaException { + boolean ifPurge, Configuration conf) throws MetaException { LOG.info("deleting " + f); HadoopShims hadoopShim = ShimLoader.getHadoopShims(); try { - if (hadoopShim.moveToAppropriateTrash(fs, f, conf)) { + if (ifPurge) { + LOG.info("Not moving "+ f +" to trash"); + } else if (hadoopShim.moveToAppropriateTrash(fs, f, conf)) { LOG.info("Moved to trash: " + f); return true; } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 143d1c7..96e1c26 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -201,6 +201,27 @@ void dropTable(String dbname, String tableName, boolean deleteData, NoSuchObjectException; /** + * Drop the table and choose whether to save the data in the trash. + * + * @param dbname + * The database for this table + * @param tableName + * The table to drop + * @param ifPurge + * completely purge the table (skipping trash) while removing data from warehouse + * @throws MetaException + * Could not drop table properly. + * @throws NoSuchObjectException + * The table wasn't found. + * @throws TException + * A thrift communication error occurred + * @throws ExistingDependentsException + */ + public void dropTable(String dbname, String tableName, boolean deleteData, + boolean ignoreUknownTab, boolean ifPurge) throws MetaException, TException, + NoSuchObjectException; + + /** * Drop the table in the DEFAULT database. * * @param tableName diff --git metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java index a141793..4f525a4 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java @@ -32,11 +32,12 @@ * delete a directory * * @param f + * @param ifPurge * @param recursive * @return true on success * @throws MetaException */ public boolean deleteDir(FileSystem fs, Path f, boolean recursive, - Configuration conf) throws MetaException; + boolean ifPurge, Configuration conf) throws MetaException; } diff --git metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java index 613b709..33b8dcf 100755 --- metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java @@ -224,8 +224,12 @@ public boolean renameDir(Path sourcePath, Path destPath, boolean inheritPerms) t } public boolean deleteDir(Path f, boolean recursive) throws MetaException { + return deleteDir(f, recursive, false); + } + + public boolean deleteDir(Path f, boolean recursive, boolean ifPurge) throws MetaException { FileSystem fs = getFs(f); - return fsHandler.deleteDir(fs, f, recursive, conf); + return fsHandler.deleteDir(fs, f, recursive, ifPurge, conf); } public boolean isEmpty(Path path) throws IOException, MetaException { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index cd017d8..5124428 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -3660,7 +3660,7 @@ private void dropTable(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveExc } // drop the table - db.dropTable(dropTbl.getTableName()); + db.dropTable(dropTbl.getTableName(), dropTbl.getIfPurge()); if (tbl != null) { // We have already locked the table in DDLSemanticAnalyzer, don't do it again here work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 6f225f3..2480ac3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -854,6 +854,23 @@ public boolean dropIndex(String db_name, String tbl_name, String index_name, boo /** * Drops table along with the data in it. If the table doesn't exist then it + * is a no-op. If ifPurge option is specified it is passed to the + * hdfs command that removes table data from warehouse to make it skip trash. + * + * @param tableName + * table to drop + * @param ifPurge + * completely purge the table (skipping trash) while removing data from warehouse + * @throws HiveException + * thrown if the drop fails + */ + public void dropTable(String tableName, boolean ifPurge) throws HiveException { + String[] names = Utilities.getDbTableName(tableName); + dropTable(names[0], names[1], true, true, ifPurge); + } + + /** + * Drops table along with the data in it. If the table doesn't exist then it * is a no-op * * @param tableName @@ -862,8 +879,7 @@ public boolean dropIndex(String db_name, String tbl_name, String index_name, boo * thrown if the drop fails */ public void dropTable(String tableName) throws HiveException { - String[] names = Utilities.getDbTableName(tableName); - dropTable(names[0], names[1], true, true); + dropTable(tableName, false); } /** @@ -878,7 +894,7 @@ public void dropTable(String tableName) throws HiveException { * thrown if the drop fails */ public void dropTable(String dbName, String tableName) throws HiveException { - dropTable(dbName, tableName, true, true); + dropTable(dbName, tableName, true, true, false); } /** @@ -894,9 +910,26 @@ public void dropTable(String dbName, String tableName) throws HiveException { */ public void dropTable(String dbName, String tableName, boolean deleteData, boolean ignoreUnknownTab) throws HiveException { + dropTable(dbName, tableName, deleteData, ignoreUnknownTab, false); + } + /** + * Drops the table. + * + * @param dbName + * @param tableName + * @param deleteData + * deletes the underlying data along with metadata + * @param ignoreUnknownTab + * an exception if thrown if this is falser and table doesn't exist + * @param ifPurge + * completely purge the table skipping trash while removing data from warehouse + * @throws HiveException + */ + public void dropTable(String dbName, String tableName, boolean deleteData, + boolean ignoreUnknownTab, boolean ifPurge) throws HiveException { try { - getMSC().dropTable(dbName, tableName, deleteData, ignoreUnknownTab); + getMSC().dropTable(dbName, tableName, deleteData, ignoreUnknownTab, ifPurge); } catch (NoSuchObjectException e) { if (!ignoreUnknownTab) { throw new HiveException(e); @@ -1710,6 +1743,7 @@ public boolean dropPartition(String db_name, String tbl_name, public List dropPartitions(String dbName, String tblName, List partSpecs, boolean deleteData, boolean ignoreProtection, boolean ifExists) throws HiveException { + //TODO: add support for ifPurge try { Table tbl = getTable(dbName, tblName); List> partExprs = diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index 4cf98d8..551d73f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -349,7 +349,11 @@ private void dropTempTable(org.apache.hadoop.hive.metastore.api.Table table, boo // Delete table data if (deleteData && !MetaStoreUtils.isExternalTable(table)) { try { - getWh().deleteDir(tablePath, true); + boolean ifPurge = false; + if (envContext != null){ + ifPurge = Boolean.parseBoolean(envContext.getProperties().get("ifPurge")); + } + getWh().deleteDir(tablePath, true, ifPurge); } catch (Exception err) { LOG.error("Failed to delete temp table directory: " + tablePath, err); // Forgive error diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index f31a409..b338bdf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -884,7 +884,8 @@ private void analyzeDropTable(ASTNode ast, boolean expectView) outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE)); } - DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectView, ifExists); + boolean ifPurge = (ast.getFirstChildWithType(HiveParser.KW_PURGE) != null); + DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectView, ifExists, ifPurge); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc), conf)); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 32db0c7..cb8877c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -479,8 +479,8 @@ import java.util.HashMap; xlateMap.put("KW_SUBQUERY", "SUBQUERY"); xlateMap.put("KW_REWRITE", "REWRITE"); xlateMap.put("KW_UPDATE", "UPDATE"); - xlateMap.put("KW_VALUES", "VALUES"); + xlateMap.put("KW_PURGE", "PURGE"); // Operators xlateMap.put("DOT", "."); @@ -929,7 +929,7 @@ dropIndexStatement dropTableStatement @init { pushMsg("drop statement", state); } @after { popMsg(state); } - : KW_DROP KW_TABLE ifExists? tableName -> ^(TOK_DROPTABLE tableName ifExists?) + : KW_DROP KW_TABLE ifExists? tableName KW_PURGE? -> ^(TOK_DROPTABLE tableName ifExists? KW_PURGE?) ; alterStatement diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java index ba30e1f..1e9b543 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java @@ -52,6 +52,7 @@ public int getPrefixLength() { ArrayList partSpecs; boolean expectView; boolean ifExists; + boolean ifPurge; boolean ignoreProtection; public DropTableDesc() { @@ -59,12 +60,14 @@ public DropTableDesc() { /** * @param tableName + * @param ifPurge */ - public DropTableDesc(String tableName, boolean expectView, boolean ifExists) { + public DropTableDesc(String tableName, boolean expectView, boolean ifExists, boolean ifPurge) { this.tableName = tableName; this.partSpecs = null; this.expectView = expectView; this.ifExists = ifExists; + this.ifPurge = ifPurge; this.ignoreProtection = false; } @@ -149,4 +152,19 @@ public boolean getIfExists() { public void setIfExists(boolean ifExists) { this.ifExists = ifExists; } + + /** + * @return whether Purge was specified + */ + public boolean getIfPurge() { + return ifPurge; + } + + /** + * @param ifPurge + * set whether Purge was specified + */ + public void setIfPurge(boolean ifPurge) { + this.ifPurge = ifPurge; + } } diff --git ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index 406aae9..9fc99fc 100755 --- ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -18,9 +18,12 @@ package org.apache.hadoop.hive.ql.metadata; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CHECKPOINT_INTERVAL_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -28,7 +31,10 @@ import junit.framework.TestCase; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.TrashPolicy; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.MetaStoreUtils; @@ -63,6 +69,9 @@ protected void setUp() throws Exception { super.setUp(); hiveConf = new HiveConf(this.getClass()); + // enable trash so it can be tested + hiveConf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, 30); + hiveConf.setFloat(FS_TRASH_INTERVAL_KEY, 30); SessionState.start(hiveConf); try { hm = Hive.get(hiveConf); @@ -79,6 +88,9 @@ protected void setUp() throws Exception { protected void tearDown() throws Exception { try { super.tearDown(); + // disable trash + hiveConf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, 30); + hiveConf.setFloat(FS_TRASH_INTERVAL_KEY, 30); Hive.closeCurrent(); } catch (Exception e) { System.err.println(StringUtils.stringifyException(e)); @@ -294,7 +306,7 @@ public void testGetAndDropTables() throws Throwable { try { String dbName = "db_for_testgettables"; String table1Name = "table1"; - hm.dropDatabase(dbName, true, true); + hm.dropDatabase(dbName, true, true, true); Database db = new Database(); db.setName(dbName); @@ -330,16 +342,92 @@ public void testGetAndDropTables() throws Throwable { // Drop all tables for (String tableName : hm.getAllTables(dbName)) { + Table table = hm.getTable(dbName, tableName); hm.dropTable(dbName, tableName); + assertFalse(fs.exists(table.getPath())); } hm.dropDatabase(dbName); } catch (Throwable e) { System.err.println(StringUtils.stringifyException(e)); - System.err.println("testGetTables() failed"); + System.err.println("testGetAndDropTables() failed"); throw e; } } + public void testDropTableTrash() throws Throwable { + try { + String dbName = "db_for_testdroptable"; + hm.dropDatabase(dbName, true, true, true); + + Database db = new Database(); + db.setName(dbName); + hm.createDatabase(db); + + List ts = new ArrayList(2); + String tableBaseName = "droptable"; + ts.add(tableBaseName + "1"); + ts.add(tableBaseName + "2"); + Table tbl1 = createTestTable(dbName, ts.get(0)); + hm.createTable(tbl1); + Table tbl2 = createTestTable(dbName, ts.get(1)); + hm.createTable(tbl2); + // test dropping tables and trash behavior + Table table1 = hm.getTable(dbName, ts.get(0)); + assertNotNull(table1); + assertEquals(ts.get(0), table1.getTableName()); + Path path1 = table1.getPath(); + FileSystem fs = path1.getFileSystem(hiveConf); + assertTrue(fs.exists(path1)); + // drop table and check that trash works + TrashPolicy tp = TrashPolicy.getInstance(hiveConf, fs, fs.getHomeDirectory()); + assertNotNull("TrashPolicy instance should not be null", tp); + assertTrue("TrashPolicy is not enabled for filesystem: " + fs.getUri(), tp.isEnabled()); + Path trashDir = tp.getCurrentTrashDir(); + assertNotNull("trash directory should not be null", trashDir); + Path trash1 = Path.mergePaths(trashDir, path1); + Path pathglob = trash1.suffix("*");; + FileStatus before[] = fs.globStatus(pathglob); + hm.dropTable(dbName, ts.get(0)); + assertFalse(fs.exists(path1)); + FileStatus after[] = fs.globStatus(pathglob); + assertTrue("trash dir before and after DROP TABLE noPURGE are not different", + before.length != after.length); + + // drop a table without saving to trash by setting the purge option + Table table2 = hm.getTable(dbName, ts.get(1)); + assertNotNull(table2); + assertEquals(ts.get(1), table2.getTableName()); + Path path2 = table2.getPath(); + assertTrue(fs.exists(path2)); + Path trash2 = Path.mergePaths(trashDir, path2); + System.out.println("trashDir2 is " + trash2); + pathglob = trash2.suffix("*"); + before = fs.globStatus(pathglob); + hm.dropTable(dbName, ts.get(1), true, true, true); // deleteData, ignoreUnknownTable, ifPurge + assertFalse(fs.exists(path2)); + after = fs.globStatus(pathglob); + Arrays.sort(before); + Arrays.sort(after); + assertEquals("trash dir before and after DROP TABLE PURGE are different", + before.length, after.length); + assertTrue("trash dir before and after DROP TABLE PURGE are different", + Arrays.equals(before, after)); + + // Drop all tables + for (String tableName : hm.getAllTables(dbName)) { + Table table = hm.getTable(dbName, tableName); + hm.dropTable(dbName, tableName); + assertFalse(fs.exists(table.getPath())); + } + hm.dropDatabase(dbName); + } catch (Throwable e) { + System.err.println(StringUtils.stringifyException(e)); + System.err.println("testDropTableTrash() failed"); + throw e; + } + } + + public void testPartition() throws Throwable { try { String tableName = "table_for_testpartition"; diff --git ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java index 1a5ba87..ee95e16 100644 --- ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java +++ ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java @@ -84,6 +84,13 @@ protected void setUp() throws Exception { } /** + * Cannot control trash in remote metastore, so skip this test + */ + @Override + public void testDropTableTrash() { + } + + /** * Finds a free port. * * @return a free port diff --git ql/src/test/queries/clientpositive/drop_table_purge.q ql/src/test/queries/clientpositive/drop_table_purge.q new file mode 100644 index 0000000..f094a5b --- /dev/null +++ ql/src/test/queries/clientpositive/drop_table_purge.q @@ -0,0 +1,4 @@ +SET hive.metastore.batch.retrieve.max=1; +CREATE TABLE IF NOT EXISTS temp(col STRING); + +DROP TABLE temp PURGE; diff --git ql/src/test/results/clientpositive/drop_table_purge.q.out ql/src/test/results/clientpositive/drop_table_purge.q.out new file mode 100644 index 0000000..14f53b6 --- /dev/null +++ ql/src/test/results/clientpositive/drop_table_purge.q.out @@ -0,0 +1,16 @@ +PREHOOK: query: CREATE TABLE IF NOT EXISTS temp(col STRING) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@temp +POSTHOOK: query: CREATE TABLE IF NOT EXISTS temp(col STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@temp +PREHOOK: query: DROP TABLE temp PURGE +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@temp +PREHOOK: Output: default@temp +POSTHOOK: query: DROP TABLE temp PURGE +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@temp +POSTHOOK: Output: default@temp