diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index 689c859804..ad46929243 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -91,6 +91,7 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION; +import org.junit.Assert; public class TestReplicationScenarios { @@ -3185,6 +3186,27 @@ public void testLoadCmPathMissing() throws IOException { fs.create(path, false); } + @Test + public void testDumpFileMissing() throws IOException { + String dbName = createDB(testName.getMethodName(), driver); + run("CREATE TABLE " + dbName + ".normal(a int)", driver); + run("INSERT INTO " + dbName + ".normal values (1)", driver); + + Path path = new Path(System.getProperty("test.warehouse.dir","")); + path = new Path(path, dbName.toLowerCase()+".db"); + path = new Path(path, "normal"); + FileSystem fs = path.getFileSystem(hconf); + fs.delete(path); + + advanceDumpDir(); + CommandProcessorResponse ret = driver.run("REPL DUMP " + dbName); + Assert.assertEquals(ret.getResponseCode(), ErrorMsg.FILE_NOT_FOUND.getErrorCode()); + + run("DROP TABLE " + dbName + ".normal", driver); + run("drop database " + dbName, true, driver); + } + + @Test public void testDumpNonReplDatabase() throws IOException { String dbName = createDBNonRepl(testName.getMethodName(), driver); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java index 0f671741e5..00b9a3e3ef 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java @@ -66,6 +66,9 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION; +import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.junit.Assert; public class TestReplicationScenariosAcrossInstances { @Rule @@ -1097,6 +1100,8 @@ public Boolean apply(@Nullable CallerArguments args) { // Retry with different dump should fail. replica.loadFailure(replicatedDbName, tuple2.dumpLocation); + CommandProcessorResponse ret = replica.runCommand("REPL LOAD " + replicatedDbName + " FROM '" + tuple2.dumpLocation + "'"); + Assert.assertEquals(ret.getResponseCode(), ErrorMsg.REPL_BOOTSTRAP_LOAD_PATH_NOT_VALID.getErrorCode()); // Verify if create table is not called on table t1 but called for t2 and t3. // Also, allow constraint creation only on t1 and t3. Foreign key creation on t2 fails. diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java index fc812ade41..9e519cf0b0 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java @@ -199,6 +199,10 @@ public WarehouseInstance run(String command) throws Throwable { return this; } + public CommandProcessorResponse runCommand(String command) throws Throwable { + return driver.run(command); + } + WarehouseInstance runFailure(String command) throws Throwable { CommandProcessorResponse ret = driver.run(command); if (ret.getException() == null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java index 90d6b8f4a3..b2c9daa436 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java @@ -502,7 +502,8 @@ //if the error message is changed for REPL_EVENTS_MISSING_IN_METASTORE, then need modification in getNextNotification //method in HiveMetaStoreClient REPL_EVENTS_MISSING_IN_METASTORE(20016, "Notification events are missing in the meta store."), - REPL_BOOTSTRAP_LOAD_PATH_NOT_VALID(20017, "Target database is bootstrapped from some other path."), + REPL_BOOTSTRAP_LOAD_PATH_NOT_VALID(20017, "Load path {0} not valid as target database is bootstrapped " + + "from some other path : {1}."), REPL_FILE_MISSING_FROM_SRC_AND_CM_PATH(20018, "File is missing from both source and cm path."), REPL_LOAD_PATH_NOT_FOUND(20019, "Load path does not exist."), REPL_DATABASE_IS_NOT_SOURCE_OF_REPLICATION(20020, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java index 7e5f8050f4..e48657c35d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java @@ -121,6 +121,10 @@ protected int execute(DriverContext driverContext) { lastReplId = incrementalDump(dumpRoot, dmd, cmRoot); } prepareReturnValues(Arrays.asList(dumpRoot.toUri().toString(), String.valueOf(lastReplId)), dumpSchema); + } catch (RuntimeException e) { + LOG.error("failed", e); + setException(e); + return ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode(); } catch (Exception e) { LOG.error("failed", e); setException(e); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplUtils.java index 18a83043a6..e43c8c301a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplUtils.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.metadata.Table; @@ -114,9 +115,8 @@ public static boolean replCkptStatus(String dbName, Map props, S if (props.get(REPL_CHECKPOINT_KEY).equals(dumpRoot)) { return true; } - throw new InvalidOperationException("REPL LOAD with Dump: " + dumpRoot - + " is not allowed as the target DB: " + dbName - + " is already bootstrap loaded by another Dump " + props.get(REPL_CHECKPOINT_KEY)); + throw new InvalidOperationException(ErrorMsg.REPL_BOOTSTRAP_LOAD_PATH_NOT_VALID.format(dumpRoot, + props.get(REPL_CHECKPOINT_KEY))); } return false; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java index 0270d2afec..7ddae6f97c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadDatabase.java @@ -78,7 +78,7 @@ public TaskTracker tasks() throws SemanticException { } return tracker; } catch (Exception e) { - throw new SemanticException(e); + throw new SemanticException(e.getMessage(), e); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/PartitionExport.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/PartitionExport.java index d73fc4f336..29aa06ca7a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/PartitionExport.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/PartitionExport.java @@ -20,6 +20,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.PartitionIterable; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; @@ -29,6 +30,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.LinkedList; import java.util.List; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; @@ -36,6 +38,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; +import java.util.concurrent.Future; import static org.apache.hadoop.hive.ql.parse.repl.dump.TableExport.Paths; @@ -70,7 +73,7 @@ this.callersSession = SessionState.get(); } - void write(final ReplicationSpec forReplicationSpec) throws InterruptedException { + void write(final ReplicationSpec forReplicationSpec) throws InterruptedException, HiveException { ExecutorService producer = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder().setNameFormat("partition-submitter-thread-%d").build()); producer.submit(() -> { @@ -89,6 +92,7 @@ void write(final ReplicationSpec forReplicationSpec) throws InterruptedException ThreadFactory namingThreadFactory = new ThreadFactoryBuilder().setNameFormat("partition-dump-thread-%d").build(); ExecutorService consumer = Executors.newFixedThreadPool(nThreads, namingThreadFactory); + List> futures = new LinkedList<>(); while (!producer.isTerminated() || !queue.isEmpty()) { /* @@ -102,7 +106,7 @@ void write(final ReplicationSpec forReplicationSpec) throws InterruptedException continue; } LOG.debug("scheduling partition dump {}", partition.getName()); - consumer.submit(() -> { + futures.add(consumer.submit(() -> { String partitionName = partition.getName(); String threadName = Thread.currentThread().getName(); LOG.debug("Thread: {}, start partition dump {}", threadName, partitionName); @@ -115,11 +119,19 @@ void write(final ReplicationSpec forReplicationSpec) throws InterruptedException .export(forReplicationSpec); LOG.debug("Thread: {}, finish partition dump {}", threadName, partitionName); } catch (Exception e) { - throw new RuntimeException("Error while export of data files", e); + throw new RuntimeException(e.getMessage(), e); } - }); + })); } consumer.shutdown(); + for (Future future : futures) { + try { + future.get(); + } catch (Exception e) { + LOG.error("failed", e.getCause()); + throw new HiveException(e.getCause().getMessage(), e.getCause()); + } + } // may be drive this via configuration as well. consumer.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FileOperations.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FileOperations.java index 58eae3812c..e8eaae6961 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FileOperations.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FileOperations.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.io; import java.io.BufferedWriter; +import java.io.FileNotFoundException; import java.io.IOException; import java.io.OutputStreamWriter; import java.util.ArrayList; @@ -46,6 +47,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hive.ql.ErrorMsg.FILE_NOT_FOUND; + //TODO: this object is created once to call one method and then immediately destroyed. //So it's basically just a roundabout way to pass arguments to a static method. Simplify? public class FileOperations { @@ -156,6 +159,10 @@ private void exportFilesAsList() throws SemanticException, IOException, LoginExc } done = true; } catch (IOException e) { + if (e instanceof FileNotFoundException) { + logger.error("exporting data files in dir : " + dataPathList + " to " + exportRootDataDir + " failed"); + throw new FileNotFoundException(FILE_NOT_FOUND.format(e.getMessage())); + } repeat++; logger.info("writeFilesList failed", e); if (repeat >= FileUtils.MAX_IO_ERROR_RETRY) { diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index c2da6d362f..93ac74c68b 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -125,13 +125,10 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam Table oldt = null; - List transactionalListeners = null; - List listeners = null; + List transactionalListeners = handler.getTransactionalListeners(); + List listeners = handler.getListeners(); Map txnAlterTableEventResponses = Collections.emptyMap(); - transactionalListeners = handler.getTransactionalListeners(); - listeners = handler.getListeners(); - try { boolean rename = false; List parts; diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index e9d7e7c397..05a59d58fa 100644 --- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -4986,6 +4986,10 @@ private void alter_table_core(final String catName, final String dbname, final S try { Table oldt = get_table_core(catName, dbname, name); firePreEvent(new PreAlterTableEvent(oldt, newTable, this)); + if (newTable.getDbName() == null) { + // This check is done here to support backward compatibility of exception thrown. + throw new InvalidOperationException("Unable to alter table" + dbname + "." + name + " as new db name is null"); + } alterHandler.alterTable(getMS(), wh, catName, dbname, name, newTable, envContext, this); success = true; diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java index c40b42ab48..589551cc3c 100644 --- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java @@ -907,7 +907,7 @@ public void testAlterTableCascade() throws Exception { } } - @Test(expected = MetaException.class) + @Test(expected = InvalidOperationException.class) public void testAlterTableNullDatabaseInNew() throws Exception { Table originalTable = testTables[0]; Table newTable = originalTable.deepCopy();