From 2e86b6cd8a894355771029966ba6f59ee5f447ec Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Behera Date: Thu, 10 May 2018 22:33:58 +0530 Subject: [PATCH] HIVE-19488 : enable CM root based on db parameter, identifying a db as source of replication --- .../org/apache/hadoop/hive/ql/exec/DDLTask.java | 8 +- .../hive/ql/parse/MetaDataExportListener.java | 3 +- .../hadoop/hive/ql/txn/compactor/Cleaner.java | 7 +- .../hadoop/hive/metastore/HiveAlterHandler.java | 8 +- .../hadoop/hive/metastore/HiveMetaStore.java | 101 ++++++++++----------- .../hadoop/hive/metastore/HiveMetaStoreClient.java | 8 ++ .../hadoop/hive/metastore/ReplChangeManager.java | 26 ++++++ .../apache/hadoop/hive/metastore/Warehouse.java | 10 +- .../metastore/HiveMetaStoreClientPreCatalog.java | 9 +- 9 files changed, 114 insertions(+), 66 deletions(-) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index abde9f786f..e7c738a78b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -1587,10 +1587,10 @@ private void moveDir(FileSystem fs, Path from, Path to) throws HiveException { } } - private void deleteDir(Path dir) throws HiveException { + private void deleteDir(Path dir, Database db) throws HiveException { try { Warehouse wh = new Warehouse(conf); - wh.deleteDir(dir, true); + wh.deleteDir(dir, true, false, db); } catch (MetaException e) { throw new HiveException(e); } @@ -1845,7 +1845,7 @@ private int archive(Hive db, AlterTableSimpleDesc simpleDesc, // If a failure occurs here, the directory containing the original files // will not be deleted. The user will run ARCHIVE again to clear this up if(pathExists(intermediateOriginalDir)) { - deleteDir(intermediateOriginalDir); + deleteDir(intermediateOriginalDir, db.getDatabase(tbl.getDbName())); } if(recovery) { @@ -2051,7 +2051,7 @@ private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc) // If a failure happens here, the intermediate archive files won't be // deleted. The user will need to call unarchive again to clear those up. if(pathExists(intermediateArchivedDir)) { - deleteDir(intermediateArchivedDir); + deleteDir(intermediateArchivedDir, db.getDatabase(tbl.getDbName())); } if(recovery) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/MetaDataExportListener.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/MetaDataExportListener.java index 8fccf369f8..bbedf98c55 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/MetaDataExportListener.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/MetaDataExportListener.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IHMSHandler; +import org.apache.hadoop.hive.metastore.ReplChangeManager; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -90,7 +91,7 @@ private void export_meta_data(PreDropTableEvent tableEvent) throws MetaException EximUtil.createExportDump(fs, outFile, mTbl, null, null, new HiveConf(conf, MetaDataExportListener.class)); if (moveMetadataToTrash == true) { - wh.deleteDir(metaPath, true); + wh.deleteDir(metaPath, true, false, null); } } catch (IOException e) { throw new MetaException(e.getMessage()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java index fe6d2d663d..4364286d31 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java @@ -18,8 +18,11 @@ package org.apache.hadoop.hive.ql.txn.compactor; import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.metastore.HiveMetaStore; import org.apache.hadoop.hive.metastore.ReplChangeManager; import org.apache.hadoop.hive.metastore.txn.TxnStore; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FileStatus; @@ -349,7 +352,9 @@ private void removeFiles(String location, ValidWriteIdList writeIdList, Compacti for (Path dead : filesToDelete) { LOG.debug("Going to delete path " + dead.toString()); - replChangeManager.recycle(dead, ReplChangeManager.RecycleType.MOVE, true); + if (ReplChangeManager.isReplPolicySet(Hive.get().getDatabase(ci.dbname))) { + replChangeManager.recycle(dead, ReplChangeManager.RecycleType.MOVE, true); + } fs.delete(dead, true); } } diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 0be0aaa10c..5b70c0f333 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -242,7 +242,8 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam " already exists : " + destPath); } // check that src exists and also checks permissions necessary, rename src to dest - if (srcFs.exists(srcPath) && wh.renameDir(srcPath, destPath, true)) { + if (srcFs.exists(srcPath) && wh.renameDir(srcPath, destPath, + ReplChangeManager.isReplPolicySet(msdb.getDatabase(catName, dbname)))) { dataWasMoved = true; } } catch (IOException | MetaException e) { @@ -624,7 +625,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String } //rename the data directory - wh.renameDir(srcPath, destPath, true); + wh.renameDir(srcPath, destPath, ReplChangeManager.isReplPolicySet(msdb.getDatabase(catName, dbname))); LOG.info("Partition directory rename from " + srcPath + " to " + destPath + " done."); dataWasMoved = true; } @@ -635,6 +636,9 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String } catch (MetaException me) { LOG.error("Cannot rename partition directory from " + srcPath + " to " + destPath, me); throw me; + } catch (NoSuchObjectException e) { + LOG.error("Cannot rename partition directory from " + srcPath + " to " + destPath, e); + throw new MetaException(e.getMessage()); } new_part.getSd().setLocation(newPartLoc); diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 269798c702..33dce80fda 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -973,6 +973,7 @@ public void create_catalog(CreateCatalogRequest rqst) startFunction("create_catalog", ": " + catalog.toString()); boolean success = false; Exception ex = null; + Database db = null; try { try { getMS().getCatalog(catalog.getName()); @@ -991,6 +992,11 @@ public void create_catalog(CreateCatalogRequest rqst) RawStore ms = getMS(); Path catPath = new Path(catalog.getLocationUri()); + + // Create a default database inside the catalog + db = new Database(DEFAULT_DATABASE_NAME, "Default database for catalog " + + catalog.getName(), catalog.getLocationUri(), Collections.emptyMap()); + boolean madeDir = false; Map transactionalListenersResponses = Collections.emptyMap(); try { @@ -1006,9 +1012,6 @@ public void create_catalog(CreateCatalogRequest rqst) ms.openTransaction(); ms.createCatalog(catalog); - // Create a default database inside the catalog - Database db = new Database(DEFAULT_DATABASE_NAME, "Default database for catalog " + - catalog.getName(), catalog.getLocationUri(), Collections.emptyMap()); db.setCatalogName(catalog.getName()); create_database_core(ms, db); @@ -1024,7 +1027,7 @@ public void create_catalog(CreateCatalogRequest rqst) if (!success) { ms.rollbackTransaction(); if (madeDir) { - wh.deleteDir(catPath, true); + wh.deleteDir(catPath, true, false, ReplChangeManager.isReplPolicySet(db)); } } @@ -1154,7 +1157,8 @@ private void dropCatalogCore(String catName) success = ms.commitTransaction(); } finally { if (success) { - wh.deleteDir(wh.getDnsPath(new Path(cat.getLocationUri())), false); + //TODO:Need to separate out the db based folder to filter out databases from recycling. + wh.deleteDir(wh.getDnsPath(new Path(cat.getLocationUri())), false, false, true); } else { ms.rollbackTransaction(); } @@ -1216,7 +1220,7 @@ private void create_database_core(RawStore ms, final Database db) if (!success) { ms.rollbackTransaction(); if (madeDir) { - wh.deleteDir(dbPath, true); + wh.deleteDir(dbPath, true, false, db); } } @@ -1505,14 +1509,14 @@ private void drop_database_core(RawStore ms, String catName, ms.rollbackTransaction(); } else if (deleteData) { // Delete the data in the partitions which have other locations - deletePartitionData(partitionPaths); + deletePartitionData(partitionPaths, false, db); // Delete the data in the tables which have other locations for (Path tablePath : tablePaths) { - deleteTableData(tablePath); + deleteTableData(tablePath, false, db); } // Delete the data in the database try { - wh.deleteDir(new Path(db.getLocationUri()), true); + wh.deleteDir(new Path(db.getLocationUri()), true, false, db); } catch (Exception e) { LOG.error("Failed to delete database directory: " + db.getLocationUri() + " " + e.getMessage()); @@ -1889,7 +1893,7 @@ private void create_table_core(final RawStore ms, final Table tbl, if (!success) { ms.rollbackTransaction(); if (madeDir) { - wh.deleteDir(tblPath, true); + wh.deleteDir(tblPath, true, ms.getDatabase(tbl.getCatName(), tbl.getDbName())); } } @@ -2383,11 +2387,12 @@ private boolean drop_table_core(final RawStore ms, final String catName, final S if (!success) { ms.rollbackTransaction(); } else if (deleteData && !isExternal) { + Database db = ms.getDatabase(tbl.getCatName(), tbl.getDbName()); // Data needs deletion. Check if trash may be skipped. // Delete the data in the partitions which have other locations - deletePartitionData(partPaths, ifPurge); + deletePartitionData(partPaths, ifPurge, db); // Delete the data in the table - deleteTableData(tblPath, ifPurge); + deleteTableData(tblPath, ifPurge, db); // ok even if the data is not deleted } @@ -2402,27 +2407,19 @@ private boolean drop_table_core(final RawStore ms, final String catName, final S return success; } - /** - * Deletes the data in a table's location, if it fails logs an error - * - * @param tablePath - */ - private void deleteTableData(Path tablePath) { - deleteTableData(tablePath, false); - } - /** * Deletes the data in a table's location, if it fails logs an error * * @param tablePath * @param ifPurge completely purge the table (skipping trash) while removing * data from warehouse + * @param db database the table belongs to */ - private void deleteTableData(Path tablePath, boolean ifPurge) { + private void deleteTableData(Path tablePath, boolean ifPurge, Database db) { if (tablePath != null) { try { - wh.deleteDir(tablePath, true, ifPurge); + wh.deleteDir(tablePath, true, ifPurge, db); } catch (Exception e) { LOG.error("Failed to delete table directory: " + tablePath + " " + e.getMessage()); @@ -2430,16 +2427,6 @@ private void deleteTableData(Path tablePath, boolean ifPurge) { } } - /** - * Give a list of partitions' locations, tries to delete each one - * and for each that fails logs an error. - * - * @param partPaths - */ - private void deletePartitionData(List partPaths) { - deletePartitionData(partPaths, false); - } - /** * Give a list of partitions' locations, tries to delete each one * and for each that fails logs an error. @@ -2448,11 +2435,11 @@ private void deletePartitionData(List partPaths) { * @param ifPurge completely purge the partition (skipping trash) while * removing data from warehouse */ - private void deletePartitionData(List partPaths, boolean ifPurge) { + private void deletePartitionData(List partPaths, boolean ifPurge, Database db) { if (partPaths != null && !partPaths.isEmpty()) { for (Path partPath : partPaths) { try { - wh.deleteDir(partPath, true, ifPurge); + wh.deleteDir(partPath, true, ifPurge, db); } catch (Exception e) { LOG.error("Failed to delete partition directory: " + partPath + " " + e.getMessage()); @@ -2694,7 +2681,7 @@ public void truncate_table(final String dbName, final String tableName, List e : addedPartitions.entrySet()) { if (e.getValue()) { // we just created this directory - it's not a case of pre-creation, so we nuke. - wh.deleteDir(new Path(e.getKey().location), true); + wh.deleteDir(new Path(e.getKey().location), true, + ms.getDatabase(tbl.getCatName(), tbl.getDbName())); } } @@ -3558,7 +3552,7 @@ private int add_partitions_pspec_core(RawStore ms, String catName, String dbName for (Map.Entry e : addedPartitions.entrySet()) { if (e.getValue()) { // we just created this directory - it's not a case of pre-creation, so we nuke. - wh.deleteDir(new Path(e.getKey().location), true); + wh.deleteDir(new Path(e.getKey().location), true, ms.getDatabase(catName, dbName)); } } } @@ -3710,7 +3704,8 @@ private Partition add_partition_core(final RawStore ms, success = ms.addPartition(part); } finally { if (!success && madeDir) { - wh.deleteDir(new Path(part.getSd().getLocation()), true); + wh.deleteDir(new Path(part.getSd().getLocation()), true, + ms.getDatabase(tbl.getCatName(), tbl.getDbName())); } } @@ -4032,13 +4027,14 @@ private boolean drop_partition_common(RawStore ms, String catName, String db_nam } // Archived partitions have har:/to_har_file as their location. // The original directory was saved in params + boolean isReplEnabled = ReplChangeManager.isReplPolicySet(ms.getDatabase(catName, db_name)); if (isArchived) { assert (archiveParentDir != null); - wh.deleteDir(archiveParentDir, true, mustPurge); + wh.deleteDir(archiveParentDir, true, mustPurge, isReplEnabled); } else { assert (partPath != null); - wh.deleteDir(partPath, true, mustPurge); - deleteParentRecursive(partPath.getParent(), part_vals.size() - 1, mustPurge); + wh.deleteDir(partPath, true, mustPurge, isReplEnabled); + deleteParentRecursive(partPath.getParent(), part_vals.size() - 1, mustPurge, isReplEnabled); } // ok even if the data is not deleted } @@ -4066,12 +4062,13 @@ private static boolean isMustPurge(EnvironmentContext envContext, Table tbl) { || (tbl.isSetParameters() && "true".equalsIgnoreCase(tbl.getParameters().get("auto.purge"))); } - private void deleteParentRecursive(Path parent, int depth, boolean mustPurge) throws IOException, MetaException { + private void deleteParentRecursive(Path parent, int depth, boolean mustPurge, boolean needRecycle) + throws IOException, MetaException { if (depth > 0 && parent != null && wh.isWritable(parent)) { if (wh.isDir(parent) && wh.isEmpty(parent)) { - wh.deleteDir(parent, true, mustPurge); + wh.deleteDir(parent, true, mustPurge, needRecycle); } - deleteParentRecursive(parent.getParent(), depth - 1, mustPurge); + deleteParentRecursive(parent.getParent(), depth - 1, mustPurge, needRecycle); } } @@ -4219,13 +4216,14 @@ public DropPartitionsResult drop_partitions_req( : "dropPartition() will move partition-directories to trash-directory."); // Archived partitions have har:/to_har_file as their location. // The original directory was saved in params + boolean isReplEnabled = ReplChangeManager.isReplPolicySet(ms.getDatabase(catName, dbName)); for (Path path : archToDelete) { - wh.deleteDir(path, true, mustPurge); + wh.deleteDir(path, true, mustPurge, isReplEnabled); } for (PathAndPartValSize p : dirsToDelete) { - wh.deleteDir(p.path, true, mustPurge); + wh.deleteDir(p.path, true, mustPurge, isReplEnabled); try { - deleteParentRecursive(p.path.getParent(), p.partValSize - 1, mustPurge); + deleteParentRecursive(p.path.getParent(), p.partValSize - 1, mustPurge, isReplEnabled); } catch (IOException ex) { LOG.warn("Error from deleteParentRecursive", ex); throw new MetaException("Failed to delete parent: " + ex.getMessage()); @@ -6844,7 +6842,8 @@ public void drop_function(String dbName, String funcName) // a copy is required to allow incremental replication to work correctly. if (func.getResourceUris() != null && !func.getResourceUris().isEmpty()) { for (ResourceUri uri : func.getResourceUris()) { - if (uri.getUri().toLowerCase().startsWith("hdfs:")) { + if (uri.getUri().toLowerCase().startsWith("hdfs:") && + ReplChangeManager.isReplPolicySet(get_database_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]))) { wh.addToChangeManagement(new Path(uri.getUri())); } } diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 6af2aa5b3a..ec16104f64 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -1019,6 +1019,7 @@ public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownD public void dropDatabase(String catalogName, String dbName, boolean deleteData, boolean ignoreUnknownDb, boolean cascade) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + Database db = null; try { getDatabase(catalogName, dbName); } catch (NoSuchObjectException e) { @@ -1028,6 +1029,13 @@ public void dropDatabase(String catalogName, String dbName, boolean deleteData, return; } + // In testing mode we allow the database to be dropped even if replication is enabled. + if (!conf.getBoolean(ConfVars.HIVE_IN_TEST.getVarname(), false) && + ReplChangeManager.isReplPolicySet(db)) { + LOG.error("Replication is enabled for Database " + dbName + ", can not be dropped"); + throw new InvalidOperationException("Replication is enabled for Database " + dbName + ", can not be dropped"); + } + if (cascade) { // Note that this logic may drop some of the tables of the database // even if the drop database fail for any reason diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java index 79ba7ff35b..da2d6ceaf9 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore; import java.io.IOException; +import java.util.Map; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -33,6 +34,7 @@ import org.apache.hadoop.fs.Trash; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; @@ -467,4 +469,28 @@ static void scheduleCMClearer(Configuration conf) { 0, MetastoreConf.getTimeVar(conf, ConfVars.REPLCMINTERVAL, TimeUnit.SECONDS), TimeUnit.SECONDS); } } + + public static boolean isReplPolicySet(Database db) { + // Can not judge, so assuming replication is not enabled. + if (db == null) { + return false; + } + + // For default database replication is always enabled. + return (db.getName().toLowerCase().equals(org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME) || + getReplPolicyIdString(db) != null); + } + + public static String getReplPolicyIdString(Database db) { + if (db != null) { + Map m = db.getParameters(); + if ((m != null) && (m.containsKey("repl.source.for"))) { + String replPolicyId = m.get("repl.source.for"); + LOG.debug("repl policy for database {} is {}", db.getName(), replPolicyId); + return replPolicyId; + } + } + LOG.info("Repl policy is not set for database ", db.getName()); + return null; + } } diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java index 88cbfcdc4b..2af3f796d8 100755 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java @@ -301,18 +301,16 @@ void addToChangeManagement(Path file) throws MetaException { } } - public boolean deleteDir(Path f, boolean recursive) throws MetaException { - return deleteDir(f, recursive, false); + public boolean deleteDir(Path f, boolean recursive, Database db) throws MetaException { + return deleteDir(f, recursive, false, db); } - public boolean deleteDir(Path f, boolean recursive, boolean ifPurge) throws MetaException { - return deleteDir(f, recursive, ifPurge, true); + public boolean deleteDir(Path f, boolean recursive, boolean ifPurge, Database db) throws MetaException { + return deleteDir(f, recursive, ifPurge, ReplChangeManager.isReplPolicySet(db)); } public boolean deleteDir(Path f, boolean recursive, boolean ifPurge, boolean needCmRecycle) throws MetaException { - // no need to create the CM recycle file for temporary tables if (needCmRecycle) { - try { cm.recycle(f, RecycleType.MOVE, ifPurge); } catch (IOException e) { diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index 7186addacd..8e4a6f461f 100644 --- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -905,8 +905,9 @@ public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownD @Override public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + Database db; try { - getDatabase(name); + db = getDatabase(name); } catch (NoSuchObjectException e) { if (!ignoreUnknownDb) { throw e; @@ -914,6 +915,12 @@ public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownD return; } + if (!conf.getBoolean(ConfVars.HIVE_IN_TEST.getVarname(), false) && + ReplChangeManager.isReplPolicySet(db)) { + LOG.error("Replication is enabled for Database " + name + ", can not be dropped"); + throw new InvalidOperationException("Replication is enabled for Database " + name + ", can not be dropped"); + } + if (cascade) { List tableList = getAllTables(name); for (String table : tableList) { -- 2.14.3 (Apple Git-98)