diff --git beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java index a90127b038..a469cd49e6 100644 --- beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java +++ beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java @@ -17,6 +17,7 @@ */ package org.apache.hive.beeline; +import com.google.common.annotations.VisibleForTesting; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.GnuParser; @@ -33,6 +34,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.DatabaseProduct; import org.apache.hadoop.hive.metastore.HiveMetaException; import org.apache.hadoop.hive.metastore.IMetaStoreSchemaInfo; import org.apache.hadoop.hive.metastore.MetaStoreSchemaInfoFactory; @@ -42,6 +44,7 @@ import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper; import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.MetaStoreConnectionInfo; import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.NestedScriptParser; +import org.apache.hadoop.hive.metastore.tools.SQLGenerator; import org.apache.hadoop.hive.shims.ShimLoader; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -71,6 +74,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; + public class HiveSchemaTool { private String userName = null; private String passWord = null; @@ -85,6 +90,7 @@ private final String metaDbType; private final IMetaStoreSchemaInfo metaStoreSchemaInfo; private boolean needsQuotedIdentifier; + private String quoteCharacter; static final private Logger LOG = LoggerFactory.getLogger(HiveSchemaTool.class.getName()); @@ -100,7 +106,9 @@ public HiveSchemaTool(String hiveHome, HiveConf hiveConf, String dbType, String this.hiveConf = hiveConf; this.dbType = dbType; this.metaDbType = metaDbType; - this.needsQuotedIdentifier = getDbCommandParser(dbType, metaDbType).needsQuotedIdentifier(); + NestedScriptParser parser = getDbCommandParser(dbType, metaDbType); + this.needsQuotedIdentifier = parser.needsQuotedIdentifier(); + this.quoteCharacter = parser.getQuoteCharacter(); this.metaStoreSchemaInfo = MetaStoreSchemaInfoFactory.get(hiveConf, hiveHome, dbType); } @@ -878,6 +886,204 @@ boolean validateColumnNullValues(Connection conn) throws HiveMetaException { } } + @VisibleForTesting + void createCatalog(String catName, String location, String description, boolean ifNotExists) + throws HiveMetaException { + catName = normalizeIdentifier(catName); + System.out.println("Create catalog " + catName + " at location " + location); + + Connection conn = getConnectionToMetastore(true); + boolean success = false; + try { + conn.setAutoCommit(false); + try (Statement stmt = conn.createStatement()) { + // If they set ifNotExists check for existence first, and bail if it exists. This is + // more reliable then attempting to parse the error message from the SQLException. + if (ifNotExists) { + String query = "select " + quoteIf("NAME") + " from " + quoteIf("CTLGS") + + " where " + quoteIf("NAME") + " = '" + catName + "'"; + LOG.debug("Going to run " + query); + ResultSet rs = stmt.executeQuery(query); + if (rs.next()) { + System.out.println("Catalog " + catName + " already exists"); + return; + } + } + SQLGenerator sqlGenerator = new SQLGenerator( + DatabaseProduct.determineDatabaseProduct( + conn.getMetaData().getDatabaseProductName() + ), hiveConf); + String query = sqlGenerator.addForUpdateClause("select max(" + quoteIf("CTLG_ID") + ") " + + "from " + quoteIf("CTLGS")); + LOG.debug("Going to run " + query); + ResultSet rs = stmt.executeQuery(query); + if (!rs.next()) { + throw new HiveMetaException("No catalogs found, have you upgraded the database?"); + } + int catNum = rs.getInt(1) + 1; + + String update = "insert into " + quoteIf("CTLGS") + + "(" + quoteIf("CTLG_ID") + ", " + quoteIf("NAME") + ", " + quoteAlways("DESC") + ", " + quoteIf( "LOCATION_URI") + ") " + + " values (" + catNum + ", '" + catName + "', '" + description + "', '" + location + "')"; + LOG.debug("Going to run " + update); + stmt.execute(update); + conn.commit(); + success = true; + } + } catch (MetaException|SQLException e) { + throw new HiveMetaException("Failed to add catalog", e); + } finally { + try { + if (!success) conn.rollback(); + } catch (SQLException e) { + // Not really much we can do here. + LOG.error("Failed to rollback, everything will probably go bad from here."); + } + } + } + + @VisibleForTesting + void moveDatabase(String fromCatName, String toCatName, String dbName) throws HiveMetaException { + fromCatName = normalizeIdentifier(fromCatName); + toCatName = normalizeIdentifier(toCatName); + dbName = normalizeIdentifier(dbName); + System.out.println("Moving database " + dbName + " from catalog " + fromCatName + + " to catalog " + toCatName); + Connection conn = getConnectionToMetastore(true); + boolean success = false; + try { + conn.setAutoCommit(false); + try (Statement stmt = conn.createStatement()) { + updateCatalogNameInTable(stmt, "DBS", "CTLG_NAME", "NAME", fromCatName, toCatName, dbName, false); + updateCatalogNameInTable(stmt, "TAB_COL_STATS", "CAT_NAME", "DB_NAME", fromCatName, toCatName, dbName, true); + updateCatalogNameInTable(stmt, "PART_COL_STATS", "CAT_NAME", "DB_NAME", fromCatName, toCatName, dbName, true); + updateCatalogNameInTable(stmt, "PARTITION_EVENTS", "CAT_NAME", "DB_NAME", fromCatName, toCatName, dbName, true); + updateCatalogNameInTable(stmt, "NOTIFICATION_LOG", "CAT_NAME", "DB_NAME", fromCatName, toCatName, dbName, true); + conn.commit(); + success = true; + } + } catch (SQLException e) { + throw new HiveMetaException("Failed to move database", e); + } finally { + try { + if (!success) conn.rollback(); + } catch (SQLException e) { + // Not really much we can do here. + LOG.error("Failed to rollback, everything will probably go bad from here."); + } + } + } + + private void updateCatalogNameInTable(Statement stmt, String tableName, String catColName, + String dbColName, String fromCatName, + String toCatName, String dbName, boolean zeroUpdatesOk) + throws HiveMetaException, SQLException { + String update = "update " + quoteIf(tableName) + " " + + "set " + quoteIf(catColName) + " = '" + toCatName + "' " + + "where " + quoteIf(catColName) + " = '" + fromCatName + "' and " + quoteIf(dbColName) + " = '" + dbName + "'"; + LOG.debug("Going to run " + update); + int numUpdated = stmt.executeUpdate(update); + if (numUpdated != 1 && !(zeroUpdatesOk && numUpdated == 0)) { + throw new HiveMetaException("Failed to properly update the " + tableName + + " table. Expected to update 1 row but instead updated " + numUpdated); + } + } + + @VisibleForTesting + void moveTable(String fromCat, String toCat, String fromDb, String toDb, String tableName) + throws HiveMetaException { + fromCat = normalizeIdentifier(fromCat); + toCat = normalizeIdentifier(toCat); + fromDb = normalizeIdentifier(fromDb); + toDb = normalizeIdentifier(toDb); + tableName = normalizeIdentifier(tableName); + Connection conn = getConnectionToMetastore(true); + boolean success = false; + try { + conn.setAutoCommit(false); + try (Statement stmt = conn.createStatement()) { + // Find the old database id + String query = "select " + quoteIf("DB_ID") + + " from " + quoteIf("DBS") + + " where " + quoteIf("NAME") + " = '" + fromDb + "' " + + "and " + quoteIf("CTLG_NAME") + " = '" + fromCat + "'"; + LOG.debug("Going to run " + query); + ResultSet rs = stmt.executeQuery(query); + if (!rs.next()) { + throw new HiveMetaException("Unable to find database " + fromDb); + } + long oldDbId = rs.getLong(1); + + // Find the new database id + query = "select " + quoteIf("DB_ID") + + " from " + quoteIf("DBS") + + " where " + quoteIf("NAME") + " = '" + toDb + "' " + + "and " + quoteIf("CTLG_NAME") + " = '" + toCat + "'"; + LOG.debug("Going to run " + query); + rs = stmt.executeQuery(query); + if (!rs.next()) { + throw new HiveMetaException("Unable to find database " + toDb); + } + long newDbId = rs.getLong(1); + + String update = "update " + quoteIf("TBLS") + " " + + "set " + quoteIf("DB_ID") + " = " + newDbId + " " + + "where " + quoteIf("DB_ID") + " = " + oldDbId + + " and " + quoteIf("TBL_NAME") + " = '" + tableName + "'"; + LOG.debug("Going to run " + update); + int numUpdated = stmt.executeUpdate(update); + if (numUpdated != 1) { + throw new HiveMetaException( + "Failed to properly update TBLS table. Expected to update " + + "1 row but instead updated " + numUpdated); + } + updateDbNameForTable(stmt, "TAB_COL_STATS", "TABLE_NAME", fromCat, toCat, fromDb, toDb, tableName); + updateDbNameForTable(stmt, "PART_COL_STATS", "TABLE_NAME", fromCat, toCat, fromDb, toDb, tableName); + updateDbNameForTable(stmt, "PARTITION_EVENTS", "TBL_NAME", fromCat, toCat, fromDb, toDb, tableName); + updateDbNameForTable(stmt, "NOTIFICATION_LOG", "TBL_NAME", fromCat, toCat, fromDb, toDb, tableName); + conn.commit(); + success = true; + } + } catch (SQLException se) { + throw new HiveMetaException("Failed to move table", se); + } finally { + try { + if (!success) conn.rollback(); + } catch (SQLException e) { + // Not really much we can do here. + LOG.error("Failed to rollback, everything will probably go bad from here."); + } + + } + } + + private void updateDbNameForTable(Statement stmt, String tableName, + String tableColumnName, String fromCat, String toCat, + String fromDb, String toDb, String hiveTblName) + throws HiveMetaException, SQLException { + String update = "update " + quoteIf(tableName) + " " + + "set " + quoteIf("CAT_NAME") + " = '" + toCat + "', " + quoteIf("DB_NAME") + " = '" + toDb + "' " + + "where " + quoteIf("CAT_NAME") + " = '" + fromCat + "' " + + "and " + quoteIf("DB_NAME") + " = '" + fromDb + "' " + + "and " + quoteIf(tableColumnName) + " = '" + hiveTblName + "'"; + LOG.debug("Going to run " + update); + int numUpdated = stmt.executeUpdate(update); + if (numUpdated > 1 || numUpdated < 0) { + throw new HiveMetaException("Failed to properly update the " + tableName + + " table. Expected to update 1 row but instead updated " + numUpdated); + } + } + + // Quote if the database requires it + private String quoteIf(String identifier) { + return needsQuotedIdentifier ? quoteCharacter + identifier + quoteCharacter : identifier; + } + + // Quote always, for fields that mimic SQL keywords, like DESC + private String quoteAlways(String identifier) { + return quoteCharacter + identifier + quoteCharacter; + } + /** * Run pre-upgrade scripts corresponding to a given upgrade script, * if any exist. The errors from pre-upgrade are ignored. @@ -1026,11 +1232,27 @@ private static void initOptions(Options cmdLineOptions) { create("initSchemaTo"); Option infoOpt = new Option("info", "Show config and schema details"); Option validateOpt = new Option("validate", "Validate the database"); + Option createCatalog = OptionBuilder + .hasArg() + .withDescription("Create a catalog, requires --catalogLocation parameter as well") + .create("createCatalog"); + Option moveDatabase = OptionBuilder + .hasArg() + .withDescription("Move a database between catalogs. Argument is the database name. " + + "Requires --fromCatalog and --toCatalog parameters as well") + .create("moveDatabase"); + Option moveTable = OptionBuilder + .hasArg() + .withDescription("Move a table to a different database. Argument is the table name. " + + "Requires --fromCatalog, --toCatalog, --fromDatabase, and --toDatabase " + + " parameters as well.") + .create("moveTable"); OptionGroup optGroup = new OptionGroup(); optGroup.addOption(upgradeOpt).addOption(initOpt). addOption(help).addOption(upgradeFromOpt). - addOption(initToOpt).addOption(infoOpt).addOption(validateOpt); + addOption(initToOpt).addOption(infoOpt).addOption(validateOpt) + .addOption(createCatalog).addOption(moveDatabase).addOption(moveTable); optGroup.setRequired(true); Option userNameOpt = OptionBuilder.withArgName("user") @@ -1061,6 +1283,37 @@ private static void initOptions(Options cmdLineOptions) { Option serversOpt = OptionBuilder.withArgName("serverList") .hasArgs().withDescription("a comma-separated list of servers used in location validation in the format of scheme://authority (e.g. hdfs://localhost:8000)") .create("servers"); + Option catalogLocation = OptionBuilder + .hasArg() + .withDescription("Location of new catalog, required when adding a catalog") + .create("catalogLocation"); + Option catalogDescription = OptionBuilder + .hasArg() + .withDescription("Description of new catalog") + .create("catalogDescription"); + Option ifNotExists = OptionBuilder + .withDescription("If passed then it is not an error to create an existing catalog") + .create("ifNotExists"); + Option toCatalog = OptionBuilder + .hasArg() + .withDescription("Catalog a moving database or table is going to. This is " + + "required if you are moving a database or table.") + .create("toCatalog"); + Option fromCatalog = OptionBuilder + .hasArg() + .withDescription("Catalog a moving database or table is coming from. This is " + + "required if you are moving a database or table.") + .create("fromCatalog"); + Option toDatabase = OptionBuilder + .hasArg() + .withDescription("Database a moving table is going to. This is " + + "required if you are moving a table.") + .create("toDatabase"); + Option fromDatabase = OptionBuilder + .hasArg() + .withDescription("Database a moving table is coming from. This is " + + "required if you are moving a table.") + .create("fromDatabase"); cmdLineOptions.addOption(help); cmdLineOptions.addOption(dryRunOpt); cmdLineOptions.addOption(userNameOpt); @@ -1072,6 +1325,13 @@ private static void initOptions(Options cmdLineOptions) { cmdLineOptions.addOption(driverOpt); cmdLineOptions.addOption(dbOpts); cmdLineOptions.addOption(serversOpt); + cmdLineOptions.addOption(catalogLocation); + cmdLineOptions.addOption(catalogDescription); + cmdLineOptions.addOption(ifNotExists); + cmdLineOptions.addOption(toCatalog); + cmdLineOptions.addOption(fromCatalog); + cmdLineOptions.addOption(toDatabase); + cmdLineOptions.addOption(fromDatabase); cmdLineOptions.addOptionGroup(optGroup); } @@ -1188,6 +1448,17 @@ public static void main(String[] args) { schemaTool.doInit(schemaVer); } else if (line.hasOption("validate")) { schemaTool.doValidate(); + } else if (line.hasOption("createCatalog")) { + schemaTool.createCatalog(line.getOptionValue("createCatalog"), + line.getOptionValue("catalogLocation"), line.getOptionValue("catalogDescription"), + line.hasOption("ifNotExists")); + } else if (line.hasOption("moveDatabase")) { + schemaTool.moveDatabase(line.getOptionValue("fromCatalog"), + line.getOptionValue("toCatalog"), line.getOptionValue("moveDatabase")); + } else if (line.hasOption("moveTable")) { + schemaTool.moveTable(line.getOptionValue("fromCatalog"), line.getOptionValue("toCatalog"), + line.getOptionValue("fromDatabase"), line.getOptionValue("toDatabase"), + line.getOptionValue("moveTable")); } else { System.err.println("no valid option supplied"); printAndExit(cmdLineOptions); diff --git itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaToolCatalogOps.java itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaToolCatalogOps.java new file mode 100644 index 0000000000..db10ae64f7 --- /dev/null +++ itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaToolCatalogOps.java @@ -0,0 +1,375 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *
+ * http://www.apache.org/licenses/LICENSE-2.0 + *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.beeline;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaException;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.Catalog;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.FunctionBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.thrift.TException;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+
+public class TestSchemaToolCatalogOps {
+ private static final Logger LOG = LoggerFactory.getLogger(TestSchemaToolCatalogOps.class);
+ private static HiveSchemaTool schemaTool;
+ private static HiveConf conf;
+ private IMetaStoreClient client;
+ private static String testMetastoreDB;
+
+ @BeforeClass
+ public static void initDb() throws HiveMetaException, IOException {
+ conf = new HiveConf();
+ MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.AUTO_CREATE_ALL, false);
+ MetastoreConf.setLongVar(conf, MetastoreConf.ConfVars.HMS_HANDLER_ATTEMPTS, 1);
+ MetastoreConf.setLongVar(conf, MetastoreConf.ConfVars.THRIFT_CONNECTION_RETRIES, 1);
+ testMetastoreDB = System.getProperty("java.io.tmpdir") +
+ File.separator + "testschematoolcatopsdb";
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECT_URL_KEY,
+ "jdbc:derby:" + testMetastoreDB + ";create=true");
+ schemaTool = new HiveSchemaTool(
+ System.getProperty("test.tmp.dir", "target/tmp"), conf, "derby", null);
+ schemaTool.setUserName(MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECTION_USER_NAME));
+ schemaTool.setPassWord(MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD));
+ schemaTool.doInit(); // Pre-install the database so all the tables are there.
+
+ }
+
+ @AfterClass
+ public static void removeDb() throws Exception {
+ File metaStoreDir = new File(testMetastoreDB);
+ if (metaStoreDir.exists()) {
+ FileUtils.forceDeleteOnExit(metaStoreDir);
+ }
+ }
+
+ @Before
+ public void createClient() throws MetaException {
+ client = new HiveMetaStoreClient(conf);
+ }
+
+ @Test
+ public void createCatalog() throws HiveMetaException, TException {
+ String catName = "my_test_catalog";
+ String location = "file:///tmp/my_test_catalog";
+ String description = "very descriptive";
+ schemaTool.createCatalog(catName, location, description, false);
+
+ Catalog cat = client.getCatalog(catName);
+ Assert.assertEquals(location, cat.getLocationUri());
+ Assert.assertEquals(description, cat.getDescription());
+ }
+
+ @Test(expected = HiveMetaException.class)
+ public void createExistingCatalog() throws HiveMetaException {
+ schemaTool.createCatalog("hive", "somewhere", "", false);
+ }
+
+ @Test
+ public void createExistingCatalogWithIfNotExists() throws HiveMetaException, TException {
+ String catName = "my_existing_test_catalog";
+ String location = "file:///tmp/my_test_catalog";
+ String description = "very descriptive";
+ schemaTool.createCatalog(catName, location, description, false);
+
+ schemaTool.createCatalog(catName, location, description, true);
+ }
+
+ @Test
+ public void moveDatabase() throws HiveMetaException, TException {
+ String toCatName = "moveDbCat";
+ String dbName = "moveDbDb";
+ String tableName = "moveDbTable";
+ String funcName = "movedbfunc";
+ String partVal = "moveDbKey";
+
+ new CatalogBuilder()
+ .setName(toCatName)
+ .setLocation("file:///tmp")
+ .create(client);
+
+ Database db = new DatabaseBuilder()
+ .setCatalogName(DEFAULT_CATALOG_NAME)
+ .setName(dbName)
+ .create(client, conf);
+
+ new FunctionBuilder()
+ .inDb(db)
+ .setName(funcName)
+ .setClass("org.apache.hive.myudf")
+ .create(client, conf);
+
+ Table table = new TableBuilder()
+ .inDb(db)
+ .setTableName(tableName)
+ .addCol("a", "int")
+ .addPartCol("p", "string")
+ .create(client, conf);
+
+ new PartitionBuilder()
+ .inTable(table)
+ .addValue(partVal)
+ .addToTable(client, conf);
+
+ schemaTool.moveDatabase(DEFAULT_CATALOG_NAME, toCatName, dbName);
+
+ Database fetchedDb = client.getDatabase(toCatName, dbName);
+ Assert.assertNotNull(fetchedDb);
+ Assert.assertEquals(toCatName.toLowerCase(), fetchedDb.getCatalogName());
+
+ Function fetchedFunction = client.getFunction(toCatName, dbName, funcName);
+ Assert.assertNotNull(fetchedFunction);
+ Assert.assertEquals(toCatName.toLowerCase(), fetchedFunction.getCatName());
+ Assert.assertEquals(dbName.toLowerCase(), fetchedFunction.getDbName());
+
+ Table fetchedTable = client.getTable(toCatName, dbName, tableName);
+ Assert.assertNotNull(fetchedTable);
+ Assert.assertEquals(toCatName.toLowerCase(), fetchedTable.getCatName());
+ Assert.assertEquals(dbName.toLowerCase(), fetchedTable.getDbName());
+
+ Partition fetchedPart =
+ client.getPartition(toCatName, dbName, tableName, Collections.singletonList(partVal));
+ Assert.assertNotNull(fetchedPart);
+ Assert.assertEquals(toCatName.toLowerCase(), fetchedPart.getCatName());
+ Assert.assertEquals(dbName.toLowerCase(), fetchedPart.getDbName());
+ Assert.assertEquals(tableName.toLowerCase(), fetchedPart.getTableName());
+ }
+
+ @Test
+ public void moveDatabaseWithExistingDbOfSameNameAlreadyInTargetCatalog()
+ throws TException, HiveMetaException {
+ String catName = "clobberCatalog";
+ new CatalogBuilder()
+ .setName(catName)
+ .setLocation("file:///tmp")
+ .create(client);
+ try {
+ schemaTool.moveDatabase(catName, DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME);
+ Assert.fail("Attempt to move default database should have failed.");
+ } catch (HiveMetaException e) {
+ // good
+ }
+
+ // Make sure nothing really moved
+ Set