diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index efe9fff780..c79d4c324f 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -98,6 +98,8 @@ import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.LOAD_ACKNOWLEDGEMENT; +import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.DUMP_ACKNOWLEDGEMENT; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; @@ -315,8 +317,8 @@ public void testBasic() throws IOException { FileSystem fs = new Path(bootstrapDump.dumpLocation).getFileSystem(hconf); Path dumpPath = new Path(bootstrapDump.dumpLocation, ReplUtils.REPL_HIVE_BASE_DIR); - assertTrue(fs.exists(new Path(dumpPath, ReplUtils.DUMP_ACKNOWLEDGEMENT))); - assertTrue(fs.exists(new Path(dumpPath, ReplUtils.LOAD_ACKNOWLEDGEMENT))); + assertTrue(fs.exists(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()))); + assertTrue(fs.exists(new Path(dumpPath, LOAD_ACKNOWLEDGEMENT.toString()))); verifyRun("SELECT * from " + replicatedDbName + ".unptned", unptn_data, driverMirror); verifyRun("SELECT a from " + replicatedDbName + ".ptned WHERE b=1", ptn_data_1, driverMirror); @@ -367,8 +369,8 @@ public void testBootstrapFailedDump() throws IOException { advanceDumpDir(); FileSystem fs = new Path(bootstrapDump.dumpLocation).getFileSystem(hconf); Path dumpPath = new Path(bootstrapDump.dumpLocation, ReplUtils.REPL_HIVE_BASE_DIR); - assertTrue(fs.exists(new Path(dumpPath, ReplUtils.DUMP_ACKNOWLEDGEMENT))); - assertTrue(fs.exists(new Path(dumpPath, ReplUtils.LOAD_ACKNOWLEDGEMENT))); + assertTrue(fs.exists(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()))); + assertTrue(fs.exists(new Path(dumpPath, LOAD_ACKNOWLEDGEMENT.toString()))); verifyRun("SELECT * from " + replicatedDbName + ".unptned", unptnData, driverMirror); verifyRun("SELECT a from " + replicatedDbName + ".ptned WHERE b=1", ptnData1, driverMirror); @@ -452,7 +454,7 @@ public void testTaskCreationOptimization() throws Throwable { Path loadPath = new Path(dump.dumpLocation, ReplUtils.REPL_HIVE_BASE_DIR); //delete load ack to reload the same dump - loadPath.getFileSystem(hconf).delete(new Path(loadPath, ReplUtils.LOAD_ACKNOWLEDGEMENT), true); + loadPath.getFileSystem(hconf).delete(new Path(loadPath, LOAD_ACKNOWLEDGEMENT.toString()), true); loadAndVerify(dbNameReplica, dbName, dump.lastReplId); run("insert into table " + dbName + ".t2 partition(country='india') values ('delhi')", driver); @@ -466,7 +468,7 @@ public void testTaskCreationOptimization() throws Throwable { loadPath = new Path(dump.dumpLocation, ReplUtils.REPL_HIVE_BASE_DIR); //delete load ack to reload the same dump - loadPath.getFileSystem(hconf).delete(new Path(loadPath, ReplUtils.LOAD_ACKNOWLEDGEMENT), true); + loadPath.getFileSystem(hconf).delete(new Path(loadPath, LOAD_ACKNOWLEDGEMENT.toString()), true); loadAndVerify(dbNameReplica, dbName, dump.lastReplId); run("insert into table " + dbName + ".t2 partition(country='us') values ('sf')", driver); @@ -902,8 +904,8 @@ public void testIncrementalAdds() throws IOException { Tuple incrementalDump = incrementalLoadAndVerify(dbName, replDbName); FileSystem fs = new Path(bootstrapDump.dumpLocation).getFileSystem(hconf); Path dumpPath = new Path(incrementalDump.dumpLocation, ReplUtils.REPL_HIVE_BASE_DIR); - assertTrue(fs.exists(new Path(dumpPath, ReplUtils.DUMP_ACKNOWLEDGEMENT))); - assertTrue(fs.exists(new Path(dumpPath, ReplUtils.LOAD_ACKNOWLEDGEMENT))); + assertTrue(fs.exists(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()))); + assertTrue(fs.exists(new Path(dumpPath, LOAD_ACKNOWLEDGEMENT.toString()))); // VERIFY tables and partitions on destination for equivalence. verifyRun("SELECT * from " + replDbName + ".unptned_empty", empty, driverMirror); @@ -1439,8 +1441,8 @@ public void testBootstrapWithDataInDumpDir() throws IOException { Path path = new Path(System.getProperty("test.warehouse.dir", "")); String tableRelativeSrcPath = dbName.toLowerCase()+".db" + File.separator + "unptned"; Path srcFileLocation = new Path(path, tableRelativeSrcPath + File.separator + unptnedFileName1); - String tgtFileRelativePath = ReplUtils.REPL_HIVE_BASE_DIR + File.separator + dbName.toLowerCase() + File.separator - + "unptned" + File.separator + EximUtil.DATA_PATH_NAME +File.separator + unptnedFileName1; + String tgtFileRelativePath = ReplUtils.REPL_HIVE_BASE_DIR + File.separator + EximUtil.DATA_PATH_NAME + + File.separator + dbName.toLowerCase() + File.separator + "unptned" +File.separator + unptnedFileName1; Path tgtFileLocation = new Path(dump.dumpLocation, tgtFileRelativePath); //A file in table at src location should be copied to $dumplocation/hive///data/ verifyChecksum(srcFileLocation, tgtFileLocation, true); @@ -1449,9 +1451,10 @@ public void testBootstrapWithDataInDumpDir() throws IOException { String partitionRelativeSrcPath = dbName.toLowerCase()+".db" + File.separator + "ptned" + File.separator + "b=1"; srcFileLocation = new Path(path, partitionRelativeSrcPath + File.separator + ptnedFileName1); - tgtFileRelativePath = ReplUtils.REPL_HIVE_BASE_DIR + File.separator + dbName.toLowerCase() + tgtFileRelativePath = ReplUtils.REPL_HIVE_BASE_DIR + File.separator + EximUtil.DATA_PATH_NAME + + File.separator + dbName.toLowerCase() + File.separator + "ptned" + File.separator + "b=1" + File.separator - + EximUtil.DATA_PATH_NAME +File.separator + ptnedFileName1; + + ptnedFileName1; tgtFileLocation = new Path(dump.dumpLocation, tgtFileRelativePath); //A partitioned file in table at src location should be copied to // $dumplocation/hive//
//data/ @@ -1723,7 +1726,8 @@ public void testIncrementalLoadWithOneFailedDump() throws IOException { Tuple incrementalDump = replDumpDb(dbName); //Remove the dump ack file, so that dump is treated as an invalid dump. - String ackFileRelativePath = ReplUtils.REPL_HIVE_BASE_DIR + File.separator + ReplUtils.DUMP_ACKNOWLEDGEMENT; + String ackFileRelativePath = ReplUtils.REPL_HIVE_BASE_DIR + File.separator + + DUMP_ACKNOWLEDGEMENT.toString(); Path dumpFinishedAckFilePath = new Path(incrementalDump.dumpLocation, ackFileRelativePath); Path tmpDumpFinishedAckFilePath = new Path(dumpFinishedAckFilePath.getParent(), "old_" + dumpFinishedAckFilePath.getName()); @@ -1809,7 +1813,7 @@ public void testIncrementalLoadWithPreviousDumpDeleteFailed() throws IOException FileSystem fs = FileSystem.get(fileToDelete.toUri(), hconf); fs.delete(fileToDelete, true); assertTrue(fs.exists(bootstrapDumpDir)); - assertTrue(fs.exists(new Path(bootstrapDumpDir, ReplUtils.DUMP_ACKNOWLEDGEMENT))); + assertTrue(fs.exists(new Path(bootstrapDumpDir, DUMP_ACKNOWLEDGEMENT.toString()))); loadAndVerify(replDbName, dbName, incrDump.lastReplId); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java index 2854045350..6a01c542a9 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.parse; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -32,10 +33,12 @@ import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.IDriver; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.shims.Utils; +import org.apache.hadoop.security.UserGroupInformation; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -51,7 +54,11 @@ import java.util.Map; import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION; +import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.DUMP_ACKNOWLEDGEMENT; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; /** * TestReplicationScenariosAcidTables - test replication for ACID tables. @@ -653,4 +660,307 @@ public void testMultiDBTxn() throws Throwable { assertEquals("REPL LOAD * is not supported", e.getMessage()); } } + + @Test + public void testCheckPointingDataDumpFailure() throws Throwable { + //To force distcp copy + List dumpClause = Arrays.asList( + "'" + HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE.varname + "'='1'", + "'" + HiveConf.ConfVars.HIVE_IN_TEST.varname + "'='false'", + "'" + HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXNUMFILES.varname + "'='0'", + "'" + HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname + "'='" + + UserGroupInformation.getCurrentUser().getUserName() + "'"); + + WarehouseInstance.Tuple bootstrapDump = primary.run("use " + primaryDbName) + .run("CREATE TABLE t1(a string) STORED AS TEXTFILE") + .run("CREATE TABLE t2(a string) STORED AS TEXTFILE") + .run("insert into t1 values (1)") + .run("insert into t1 values (2)") + .run("insert into t1 values (3)") + .run("insert into t2 values (11)") + .run("insert into t2 values (21)") + .dump(primaryDbName); + + FileSystem fs = new Path(bootstrapDump.dumpLocation).getFileSystem(conf); + Path dumpPath = new Path(bootstrapDump.dumpLocation, ReplUtils.REPL_HIVE_BASE_DIR); + assertTrue(fs.exists(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()))); + Path metadataPath = new Path(dumpPath, EximUtil.METADATA_PATH_NAME); + long modifiedTimeMetadata = fs.getFileStatus(metadataPath).getModificationTime(); + Path dataPath = new Path(dumpPath, EximUtil.DATA_PATH_NAME); + Path dbDataPath = new Path(dataPath, primaryDbName.toLowerCase()); + Path tablet1Path = new Path(dbDataPath, "t1"); + Path tablet2Path = new Path(dbDataPath, "t2"); + //Delete dump ack and t2 data, metadata should be rewritten, data should be same for t1 but rewritten for t2 + fs.delete(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()), true); + assertFalse(fs.exists(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()))); + FileStatus[] statuses = fs.listStatus(tablet2Path); + //Delete t2 data + fs.delete(statuses[0].getPath(), true); + long modifiedTimeTable1 = fs.getFileStatus(tablet1Path).getModificationTime(); + long modifiedTimeTable1CopyFile = fs.listStatus(tablet1Path)[0].getModificationTime(); + long modifiedTimeTable2 = fs.getFileStatus(tablet2Path).getModificationTime(); + //Do another dump. It should only dump table t2. Modification time of table t1 should be same while t2 is greater + WarehouseInstance.Tuple nextDump = primary.dump(primaryDbName, dumpClause); + assertEquals(nextDump.dumpLocation, bootstrapDump.dumpLocation); + assertTrue(fs.exists(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()))); + assertEquals(modifiedTimeTable1, fs.getFileStatus(tablet1Path).getModificationTime()); + assertEquals(modifiedTimeTable1CopyFile, fs.listStatus(tablet1Path)[0].getModificationTime()); + assertTrue(modifiedTimeTable2 < fs.getFileStatus(tablet2Path).getModificationTime()); + assertTrue(modifiedTimeMetadata < fs.getFileStatus(metadataPath).getModificationTime()); + replica.load(replicatedDbName, primaryDbName) + .run("select * from " + replicatedDbName + ".t1") + .verifyResults(new String[] {"1", "2", "3"}) + .run("select * from " + replicatedDbName + ".t2") + .verifyResults(new String[]{"11", "21"}); + } + + @Test + public void testCheckPointingDataDumpFailureRegularCopy() throws Throwable { + WarehouseInstance.Tuple bootstrapDump = primary.run("use " + primaryDbName) + .run("CREATE TABLE t1(a string) STORED AS TEXTFILE") + .run("CREATE TABLE t2(a string) STORED AS TEXTFILE") + .run("insert into t1 values (1)") + .run("insert into t1 values (2)") + .run("insert into t1 values (3)") + .run("insert into t2 values (11)") + .run("insert into t2 values (21)") + .dump(primaryDbName); + FileSystem fs = new Path(bootstrapDump.dumpLocation).getFileSystem(conf); + Path dumpPath = new Path(bootstrapDump.dumpLocation, ReplUtils.REPL_HIVE_BASE_DIR); + assertTrue(fs.exists(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()))); + Path metadataPath = new Path(dumpPath, EximUtil.METADATA_PATH_NAME); + long modifiedTimeMetadata = fs.getFileStatus(metadataPath).getModificationTime(); + Path dataPath = new Path(dumpPath, EximUtil.DATA_PATH_NAME); + Path dbPath = new Path(dataPath, primaryDbName.toLowerCase()); + Path tablet1Path = new Path(dbPath, "t1"); + Path tablet2Path = new Path(dbPath, "t2"); + //Delete dump ack and t2 data, metadata should be rewritten, data should be same for t1 but rewritten for t2 + fs.delete(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()), true); + assertFalse(fs.exists(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()))); + FileStatus[] statuses = fs.listStatus(tablet2Path); + //Delete t2 data + fs.delete(statuses[0].getPath(), true); + long modifiedTimeTable1 = fs.getFileStatus(tablet1Path).getModificationTime(); + long modifiedTimeTable1CopyFile = fs.listStatus(tablet1Path)[0].getModificationTime(); + long modifiedTimeTable2 = fs.getFileStatus(tablet2Path).getModificationTime(); + //Do another dump. It should only dump table t2. Modification time of table t1 should be same while t2 is greater + WarehouseInstance.Tuple nextDump = primary.dump(primaryDbName); + assertEquals(nextDump.dumpLocation, bootstrapDump.dumpLocation); + assertTrue(fs.exists(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()))); + //File is copied again as we are using regular copy + assertTrue(modifiedTimeTable1 < fs.getFileStatus(tablet1Path).getModificationTime()); + assertTrue(modifiedTimeTable1CopyFile < fs.listStatus(tablet1Path)[0].getModificationTime()); + assertTrue(modifiedTimeTable2 < fs.getFileStatus(tablet2Path).getModificationTime()); + assertTrue(modifiedTimeMetadata < fs.getFileStatus(metadataPath).getModificationTime()); + replica.load(replicatedDbName, primaryDbName) + .run("select * from " + replicatedDbName + ".t1") + .verifyResults(new String[] {"1", "2", "3"}) + .run("select * from " + replicatedDbName + ".t2") + .verifyResults(new String[]{"11", "21"}); + } + + @Test + public void testCheckPointingWithSourceTableDataInserted() throws Throwable { + //To force distcp copy + List dumpClause = Arrays.asList( + "'" + HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE.varname + "'='1'", + "'" + HiveConf.ConfVars.HIVE_IN_TEST.varname + "'='false'", + "'" + HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXNUMFILES.varname + "'='0'", + "'" + HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname + "'='" + + UserGroupInformation.getCurrentUser().getUserName() + "'"); + + WarehouseInstance.Tuple bootstrapDump = primary.run("use " + primaryDbName) + .run("CREATE TABLE t1(a string) STORED AS TEXTFILE") + .run("CREATE TABLE t2(a string) STORED AS TEXTFILE") + .run("insert into t1 values (1)") + .run("insert into t1 values (2)") + .run("insert into t1 values (3)") + .run("insert into t2 values (11)") + .run("insert into t2 values (21)") + .dump(primaryDbName); + + FileSystem fs = new Path(bootstrapDump.dumpLocation).getFileSystem(conf); + Path dumpPath = new Path(bootstrapDump.dumpLocation, ReplUtils.REPL_HIVE_BASE_DIR); + assertTrue(fs.exists(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()))); + Path dataPath = new Path(dumpPath, EximUtil.DATA_PATH_NAME); + Path dbPath = new Path(dataPath, primaryDbName.toLowerCase()); + Path tablet1Path = new Path(dbPath, "t1"); + Path tablet2Path = new Path(dbPath, "t2"); + long modifiedTimeTable2 = fs.getFileStatus(tablet2Path).getModificationTime(); + //Delete table 2 data + FileStatus[] statuses = fs.listStatus(tablet2Path); + //Delete t2 data + fs.delete(statuses[0].getPath(), true); + fs.delete(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()), true); + long modifiedTimeTable1CopyFile = fs.listStatus(tablet1Path)[0].getModificationTime(); + + //Do another dump. It should only dump table t2. Also insert new data in existing tables. + // New data should be there in target + primary.run("use " + primaryDbName) + .run("insert into t2 values (13)") + .run("insert into t2 values (24)") + .run("insert into t1 values (4)") + .dump(primaryDbName, dumpClause); + + replica.load(replicatedDbName, primaryDbName) + .run("use " + replicatedDbName) + .run("select * from t1") + .verifyResults(new String[]{"1", "2", "3", "4"}) + .run("select * from t2") + .verifyResults(new String[]{"11", "21", "13", "24"}); + assertEquals(modifiedTimeTable1CopyFile, fs.listStatus(tablet1Path)[0].getModificationTime()); + assertTrue(modifiedTimeTable2 < fs.getFileStatus(tablet2Path).getModificationTime()); + } + + @Test + public void testCheckPointingWithNewTablesAdded() throws Throwable { + //To force distcp copy + List dumpClause = Arrays.asList( + "'" + HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE.varname + "'='1'", + "'" + HiveConf.ConfVars.HIVE_IN_TEST.varname + "'='false'", + "'" + HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXNUMFILES.varname + "'='0'", + "'" + HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname + "'='" + + UserGroupInformation.getCurrentUser().getUserName() + "'"); + + WarehouseInstance.Tuple bootstrapDump = primary.run("use " + primaryDbName) + .run("CREATE TABLE t1(a string) STORED AS TEXTFILE") + .run("CREATE TABLE t2(a string) STORED AS TEXTFILE") + .run("insert into t1 values (1)") + .run("insert into t1 values (2)") + .run("insert into t1 values (3)") + .run("insert into t2 values (11)") + .run("insert into t2 values (21)") + .dump(primaryDbName); + + FileSystem fs = new Path(bootstrapDump.dumpLocation).getFileSystem(conf); + Path dumpPath = new Path(bootstrapDump.dumpLocation, ReplUtils.REPL_HIVE_BASE_DIR); + assertTrue(fs.exists(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()))); + Path dataPath = new Path(dumpPath, EximUtil.DATA_PATH_NAME); + Path dbPath = new Path(dataPath, primaryDbName.toLowerCase()); + Path tablet1Path = new Path(dbPath, "t1"); + Path tablet2Path = new Path(dbPath, "t2"); + long modifiedTimeTable1 = fs.getFileStatus(tablet1Path).getModificationTime(); + long modifiedTimeTable2 = fs.getFileStatus(tablet2Path).getModificationTime(); + //Delete table 2 data + FileStatus[] statuses = fs.listStatus(tablet2Path); + fs.delete(statuses[0].getPath(), true); + fs.delete(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()), true); + long modifiedTimeTable1CopyFile = fs.listStatus(tablet1Path)[0].getModificationTime(); + + // Do another dump. It should only dump table t2 and next table. + // Also insert new tables. New tables will be there in target + primary.run("use " + primaryDbName) + .run("insert into t2 values (13)") + .run("insert into t2 values (24)") + .run("create table t3(a string) STORED AS TEXTFILE") + .run("insert into t3 values (1)") + .run("insert into t3 values (2)") + .run("insert into t3 values (3)") + .dump(primaryDbName, dumpClause); + + replica.load(replicatedDbName, primaryDbName) + .run("use " + replicatedDbName) + .run("select * from t1") + .verifyResults(new String[]{"1", "2", "3"}) + .run("select * from t2") + .verifyResults(new String[]{"11", "21", "13", "24"}) + .run("show tables") + .verifyResults(new String[]{"t1", "t2", "t3"}) + .run("select * from t3") + .verifyResults(new String[]{"1", "2", "3"}); + assertEquals(modifiedTimeTable1, fs.getFileStatus(tablet1Path).getModificationTime()); + assertEquals(modifiedTimeTable1CopyFile, fs.listStatus(tablet1Path)[0].getModificationTime()); + assertTrue(modifiedTimeTable2 < fs.getFileStatus(tablet2Path).getModificationTime()); + } + + @Test + public void testCheckPointingWithSourceTableDeleted() throws Throwable { + //To force distcp copy + List dumpClause = Arrays.asList( + "'" + HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE.varname + "'='1'", + "'" + HiveConf.ConfVars.HIVE_IN_TEST.varname + "'='false'", + "'" + HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXNUMFILES.varname + "'='0'", + "'" + HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname + "'='" + + UserGroupInformation.getCurrentUser().getUserName() + "'"); + + WarehouseInstance.Tuple bootstrapDump = primary.run("use " + primaryDbName) + .run("CREATE TABLE t1(a string) STORED AS TEXTFILE") + .run("CREATE TABLE t2(a string) STORED AS TEXTFILE") + .run("insert into t1 values (1)") + .run("insert into t1 values (2)") + .run("insert into t1 values (3)") + .run("insert into t2 values (11)") + .run("insert into t2 values (21)") + .dump(primaryDbName); + + FileSystem fs = new Path(bootstrapDump.dumpLocation).getFileSystem(conf); + Path dumpPath = new Path(bootstrapDump.dumpLocation, ReplUtils.REPL_HIVE_BASE_DIR); + assertTrue(fs.exists(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()))); + + + //Delete dump ack and t2 data, Also drop table. New data will be there in target + fs.delete(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()), true); + assertFalse(fs.exists(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()))); + Path dataPath = new Path(dumpPath, EximUtil.DATA_PATH_NAME); + Path dbPath = new Path(dataPath, primaryDbName.toLowerCase()); + Path tablet2Path = new Path(dbPath, "t2"); + FileStatus[] statuses = fs.listStatus(tablet2Path); + //Delete t2 data. + fs.delete(statuses[0].getPath(), true); + //Drop table t1. Target shouldn't have t1 table as metadata dump is rewritten + primary.run("use " + primaryDbName) + .run("drop table t1") + .dump(primaryDbName, dumpClause); + + replica.load(replicatedDbName, primaryDbName) + .run("use " + replicatedDbName) + .run("show tables") + .verifyResults(new String[]{"t2"}) + .run("select * from t2") + .verifyResults(new String[]{"11", "21"}); + } + + @Test + public void testCheckPointingMetadataDumpFailure() throws Throwable { + //To force distcp copy + List dumpClause = Arrays.asList( + "'" + HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE.varname + "'='1'", + "'" + HiveConf.ConfVars.HIVE_IN_TEST.varname + "'='false'", + "'" + HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXNUMFILES.varname + "'='0'", + "'" + HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname + "'='" + + UserGroupInformation.getCurrentUser().getUserName() + "'"); + + WarehouseInstance.Tuple bootstrapDump = primary.run("use " + primaryDbName) + .run("CREATE TABLE t1(a string) STORED AS TEXTFILE") + .run("CREATE TABLE t2(a string) STORED AS TEXTFILE") + .run("insert into t1 values (1)") + .run("insert into t1 values (2)") + .run("insert into t1 values (3)") + .run("insert into t2 values (11)") + .run("insert into t2 values (21)") + .dump(primaryDbName); + FileSystem fs = new Path(bootstrapDump.dumpLocation).getFileSystem(conf); + Path dumpPath = new Path(bootstrapDump.dumpLocation, ReplUtils.REPL_HIVE_BASE_DIR); + assertTrue(fs.exists(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()))); + + //Delete dump ack and metadata ack, everything should be rewritten in a new dump dir + fs.delete(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()), true); + fs.delete(new Path(dumpPath, "_dumpmetadata"), true); + assertFalse(fs.exists(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()))); + //Insert new data + primary.run("insert into "+ primaryDbName +".t1 values (12)"); + primary.run("insert into "+ primaryDbName +".t1 values (13)"); + //Do another dump. It should dump everything in a new dump dir + // checkpointing will not be used + WarehouseInstance.Tuple nextDump = primary.dump(primaryDbName, dumpClause); + replica.load(replicatedDbName, primaryDbName) + .run("use " + replicatedDbName) + .run("select * from t2") + .verifyResults(new String[]{"11", "21"}) + .run("select * from t1") + .verifyResults(new String[]{"1", "2", "3", "12", "13"}); + assertNotEquals(nextDump.dumpLocation, bootstrapDump.dumpLocation); + dumpPath = new Path(nextDump.dumpLocation, ReplUtils.REPL_HIVE_BASE_DIR); + assertTrue(fs.exists(new Path(dumpPath, DUMP_ACKNOWLEDGEMENT.toString()))); + } } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java index 56b27a555e..33124c8f76 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java @@ -55,6 +55,7 @@ import java.util.stream.Collectors; import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION; +import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.LOAD_ACKNOWLEDGEMENT; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; @@ -1093,13 +1094,13 @@ public void testIfBootstrapReplLoadFailWhenRetryAfterBootstrapComplete() throws // To retry with same dump delete the load ack new Path(tuple.dumpLocation).getFileSystem(conf).delete(new Path( - hiveDumpLocation, ReplUtils.LOAD_ACKNOWLEDGEMENT), true); + hiveDumpLocation, LOAD_ACKNOWLEDGEMENT.toString()), true); // Retry with same dump with which it was already loaded also fails. replica.loadFailure(replicatedDbName, primaryDbName); // To retry with same dump delete the load ack new Path(tuple.dumpLocation).getFileSystem(conf).delete(new Path( - hiveDumpLocation, ReplUtils.LOAD_ACKNOWLEDGEMENT), true); + hiveDumpLocation, LOAD_ACKNOWLEDGEMENT.toString()), true); // Retry from same dump when the database is empty is also not allowed. replica.run("drop table t1") .run("drop table t2") @@ -1344,7 +1345,7 @@ public void testMoveOptimizationIncrementalFailureAfterCopyReplace() throws Thro //delete load ack to reuse the dump new Path(tuple.dumpLocation).getFileSystem(conf).delete(new Path(tuple.dumpLocation + Path.SEPARATOR + ReplUtils.REPL_HIVE_BASE_DIR + Path.SEPARATOR - + ReplUtils.LOAD_ACKNOWLEDGEMENT), true); + + LOAD_ACKNOWLEDGEMENT.toString()), true); replica.load(replicatedDbName_CM, primaryDbName, withConfigs); replica.run("alter database " + replicatedDbName + " set DBPROPERTIES ('" + SOURCE_OF_REPLICATION + "' = '1,2,3')") @@ -1370,7 +1371,7 @@ public void testMoveOptimizationIncrementalFailureAfterCopy() throws Throwable { replica.load(replicatedDbName, primaryDbName, withConfigs); //delete load ack to reuse the dump new Path(bootstrapDump.dumpLocation).getFileSystem(conf).delete(new Path(bootstrapDump.dumpLocation - + Path.SEPARATOR + ReplUtils.REPL_HIVE_BASE_DIR + Path.SEPARATOR + ReplUtils.LOAD_ACKNOWLEDGEMENT), true); + + Path.SEPARATOR + ReplUtils.REPL_HIVE_BASE_DIR + Path.SEPARATOR + LOAD_ACKNOWLEDGEMENT.toString()), true); replica.load(replicatedDbName_CM, primaryDbName, withConfigs); replica.run("alter database " + replicatedDbName + " set DBPROPERTIES ('" + SOURCE_OF_REPLICATION + "' = '1,2,3')") .run("alter database " + replicatedDbName_CM + " set DBPROPERTIES ('" + SOURCE_OF_REPLICATION + "' = '1,2,3')"); @@ -1423,7 +1424,7 @@ public Boolean apply(NotificationEvent entry) { //delete load ack to reuse the dump new Path(tuple.dumpLocation).getFileSystem(conf).delete(new Path(tuple.dumpLocation + Path.SEPARATOR + ReplUtils.REPL_HIVE_BASE_DIR + Path.SEPARATOR - + ReplUtils.LOAD_ACKNOWLEDGEMENT), true); + + LOAD_ACKNOWLEDGEMENT.toString()), true); InjectableBehaviourObjectStore.setAddNotificationModifier(callerVerifier); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java index 132578991d..54b0dbf0b0 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java @@ -152,7 +152,7 @@ public void externalTableReplicationWithDefaultPaths() throws Throwable { .dump(primaryDbName, withClauseOptions); // verify that the external table info is written correctly for bootstrap - assertExternalFileInfo(Arrays.asList("t1", "t2"), tuple.dumpLocation, primaryDbName); + assertExternalFileInfo(Arrays.asList("t1", "t2"), tuple.dumpLocation, primaryDbName, false); @@ -184,7 +184,7 @@ public void externalTableReplicationWithDefaultPaths() throws Throwable { .dump(primaryDbName, withClauseOptions); // verify that the external table info is written correctly for incremental - assertExternalFileInfo(Arrays.asList("t1", "t2", "t3", "t4"), tuple.dumpLocation); + assertExternalFileInfo(Arrays.asList("t1", "t2", "t3", "t4"), tuple.dumpLocation, true); replica.load(replicatedDbName, primaryDbName, withClauseOptions) .run("use " + replicatedDbName) @@ -202,7 +202,7 @@ public void externalTableReplicationWithDefaultPaths() throws Throwable { .dumpWithCommand("repl dump " + primaryDbName); // verify that the external table info is written correctly for incremental - assertExternalFileInfo(Arrays.asList("t2", "t3", "t4"), tuple.dumpLocation); + assertExternalFileInfo(Arrays.asList("t2", "t3", "t4"), tuple.dumpLocation, true); } /** @@ -310,7 +310,7 @@ public void externalTableWithPartitions() throws Throwable { .run("insert into t2 partition(country='india') values ('bangalore')") .dump(primaryDbName, withClause); - assertExternalFileInfo(Collections.singletonList("t2"), tuple.dumpLocation, primaryDbName); + assertExternalFileInfo(Collections.singletonList("t2"), tuple.dumpLocation, primaryDbName, false); replica.load(replicatedDbName, primaryDbName, withClause) .run("use " + replicatedDbName) @@ -333,7 +333,7 @@ public void externalTableWithPartitions() throws Throwable { .run("insert into t2 partition(country='australia') values ('sydney')") .dump(primaryDbName, withClause); - assertExternalFileInfo(Collections.singletonList("t2"), tuple.dumpLocation); + assertExternalFileInfo(Collections.singletonList("t2"), tuple.dumpLocation, true); replica.load(replicatedDbName, primaryDbName, withClause) .run("use " + replicatedDbName) @@ -420,7 +420,7 @@ public void externalTableIncrementalReplication() throws Throwable { .run("alter table t1 add partition(country='us')") .dump(primaryDbName, withClause); - assertExternalFileInfo(Collections.singletonList("t1"), tuple.dumpLocation); + assertExternalFileInfo(Collections.singletonList("t1"), tuple.dumpLocation, true); // Add new data externally, to a partition, but under the partition level top directory // Also, it is added after dumping the events so data should not be seen at target after REPL LOAD. @@ -467,7 +467,7 @@ public void externalTableIncrementalReplication() throws Throwable { // Repl load with zero events but external tables location info should present. tuple = primary.dump(primaryDbName, withClause); - assertExternalFileInfo(Collections.singletonList("t1"), tuple.dumpLocation); + assertExternalFileInfo(Collections.singletonList("t1"), tuple.dumpLocation, true); replica.load(replicatedDbName, primaryDbName, withClause) .run("use " + replicatedDbName) @@ -547,7 +547,7 @@ public void bootstrapExternalTablesDuringIncrementalPhase() throws Throwable { .exists(new Path(tuple.dumpLocation, relativeExtInfoPath(null)))); // verify that the external table info is written correctly for incremental - assertExternalFileInfo(Arrays.asList("t2", "t3"), tuple.dumpLocation); + assertExternalFileInfo(Arrays.asList("t2", "t3"), tuple.dumpLocation, true); // _bootstrap directory should be created as bootstrap enabled on external tables. String hiveDumpLocation = tuple.dumpLocation + File.separator + ReplUtils.REPL_HIVE_BASE_DIR; @@ -762,7 +762,7 @@ public Table apply(@Nullable Table table) { } // Only table t2 should exist in the data location list file. - assertExternalFileInfo(Collections.singletonList("t2"), tupleInc.dumpLocation); + assertExternalFileInfo(Collections.singletonList("t2"), tupleInc.dumpLocation, true); // The newly inserted data "2" should be missing in table "t1". But, table t2 should exist and have // inserted data. @@ -917,20 +917,29 @@ public void replicationWithTableNameContainsKeywords() throws Throwable { return ReplicationTestUtils.externalTableWithClause(extTblBaseDir, bootstrapExtTbl, includeExtTbl); } - private void assertExternalFileInfo(List expected, String dumplocation) throws IOException { - assertExternalFileInfo(expected, dumplocation, null); + private void assertExternalFileInfo(List expected, String dumplocation, + boolean isIncremental) throws IOException { + assertExternalFileInfo(expected, dumplocation, null, isIncremental); } - private void assertExternalFileInfo(List expected, String dumplocation, String dbName) + private void assertExternalFileInfo(List expected, String dumplocation, String dbName, + boolean isIncremental) throws IOException { - Path externalTableInfoFile = new Path(dumplocation, relativeExtInfoPath(dbName)); + Path hivePath = new Path(dumplocation, ReplUtils.REPL_HIVE_BASE_DIR); + Path metadataPath = new Path(hivePath, EximUtil.METADATA_PATH_NAME); + Path externalTableInfoFile; + if (isIncremental) { + externalTableInfoFile = new Path(hivePath + relativeExtInfoPath(dbName)); + } else { + externalTableInfoFile = new Path(metadataPath + relativeExtInfoPath(dbName)); + } ReplicationTestUtils.assertExternalFileInfo(primary, expected, externalTableInfoFile); } - private String relativeExtInfoPath(String dbName) { + private String relativeExtInfoPath(String dbName) { if (dbName == null) { - return ReplUtils.REPL_HIVE_BASE_DIR + File.separator + FILE_NAME; + return File.separator + FILE_NAME; } else { - return ReplUtils.REPL_HIVE_BASE_DIR + File.separator + dbName.toLowerCase() + File.separator + FILE_NAME; + return File.separator + dbName.toLowerCase() + File.separator + FILE_NAME; } } } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestTableLevelReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestTableLevelReplicationScenarios.java index 78251f2423..93e24ef852 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestTableLevelReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestTableLevelReplicationScenarios.java @@ -20,6 +20,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder; @@ -242,7 +243,12 @@ private void verifyBootstrapDirInIncrementalDump(String dumpLocation, String[] b // Check if the DB dump path have any tables other than the ones listed in bootstrappedTables. Path dbPath = new Path(dumpPath, primaryDbName); - FileStatus[] fileStatuses = primary.miniDFSCluster.getFileSystem().listStatus(dbPath); + FileStatus[] fileStatuses = primary.miniDFSCluster.getFileSystem().listStatus(dbPath, new PathFilter() { + @Override + public boolean accept(Path path) { + return !path.getName().equalsIgnoreCase(EximUtil.DATA_PATH_NAME); + } + }); Assert.assertEquals(fileStatuses.length, bootstrappedTables.length); // Eg: _bootstrap//t2, _bootstrap//t3 etc @@ -500,13 +506,14 @@ public void testBootstrapExternalTablesWithIncludeAndExcludeList() throws Throwa .dump(replPolicy, dumpWithClause); String hiveDumpDir = tuple.dumpLocation + File.separator + ReplUtils.REPL_HIVE_BASE_DIR; + Path metaDataPath = new Path(hiveDumpDir, EximUtil.METADATA_PATH_NAME); // the _external_tables_file info should be created as external tables are to be replicated. Assert.assertTrue(primary.miniDFSCluster.getFileSystem() - .exists(new Path(new Path(hiveDumpDir, primaryDbName.toLowerCase()), FILE_NAME))); + .exists(new Path(new Path(metaDataPath, primaryDbName.toLowerCase()), FILE_NAME))); // Verify that the external table info contains only table "a2". ReplicationTestUtils.assertExternalFileInfo(primary, Arrays.asList("a2"), - new Path(new Path(hiveDumpDir, primaryDbName.toLowerCase()), FILE_NAME)); + new Path(new Path(metaDataPath, primaryDbName.toLowerCase()), FILE_NAME)); replica.load(replicatedDbName, replPolicy, loadWithClause) .run("use " + replicatedDbName) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExportTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExportTask.java index 56f0c9308e..d3e94134ab 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExportTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExportTask.java @@ -46,7 +46,7 @@ public int execute() { TableExport.Paths exportPaths = new TableExport.Paths( work.getAstRepresentationForErrorMsg(), work.getExportRootDir(), conf, false); Hive db = getHive(); - LOG.debug("Exporting data to: {}", exportPaths.exportRootDir()); + LOG.debug("Exporting data to: {}", exportPaths.metadataExportRootDir()); work.acidPostProcess(db); TableExport tableExport = new TableExport(exportPaths, work.getTableSpec(), work.getReplicationSpec(), db, null, conf, work.getMmContext()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyWork.java index efef052268..46f9bb3add 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/DirCopyWork.java @@ -28,6 +28,7 @@ Explain.Level.DEFAULT, Explain.Level.EXTENDED }) public class DirCopyWork implements Serializable { + private static final long serialVersionUID = 1L; private final Path fullyQualifiedSourcePath; private final Path fullyQualifiedTargetPath; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplAck.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplAck.java new file mode 100644 index 0000000000..db8db5f8e7 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplAck.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.repl; + +/** + * ReplAck, used for repl acknowledgement constants. + */ +public enum ReplAck { + DUMP_ACKNOWLEDGEMENT("_finished_dump"), + LOAD_ACKNOWLEDGEMENT("_finished_load"); + private String ack; + ReplAck(String ack) { + this.ack = ack; + } + + @Override + public String toString() { + return ack; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java index 69f6ffef5a..2e0af02094 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java @@ -93,6 +93,7 @@ import java.util.ArrayList; import java.util.concurrent.TimeUnit; import static org.apache.hadoop.hive.ql.exec.repl.ReplExternalTables.Writer; +import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.LOAD_ACKNOWLEDGEMENT; public class ReplDumpTask extends Task implements Serializable { private static final long serialVersionUID = 1L; @@ -135,20 +136,20 @@ public int execute() { Path dumpRoot = new Path(conf.getVar(HiveConf.ConfVars.REPLDIR), Base64.getEncoder().encodeToString(work.dbNameOrPattern.toLowerCase() .getBytes(StandardCharsets.UTF_8.name()))); - Path previousHiveDumpPath = getPreviousDumpMetadataPath(dumpRoot); + Path previousValidHiveDumpPath = getPreviousValidDumpMetadataPath(dumpRoot); //If no previous dump is present or previous dump is already loaded, proceed with the dump operation. - if (shouldDump(previousHiveDumpPath)) { - Path currentDumpPath = new Path(dumpRoot, getNextDumpDir()); + if (shouldDump(previousValidHiveDumpPath)) { + Path currentDumpPath = getCurrentDumpPath(dumpRoot); Path hiveDumpRoot = new Path(currentDumpPath, ReplUtils.REPL_HIVE_BASE_DIR); DumpMetaData dmd = new DumpMetaData(hiveDumpRoot, conf); // Initialize ReplChangeManager instance since we will require it to encode file URI. ReplChangeManager.getInstance(conf); Path cmRoot = new Path(conf.getVar(HiveConf.ConfVars.REPLCMDIR)); Long lastReplId; - if (previousHiveDumpPath == null) { + if (previousValidHiveDumpPath == null) { lastReplId = bootStrapDump(hiveDumpRoot, dmd, cmRoot, hiveDb); } else { - work.setEventFrom(getEventFromPreviousDumpMetadata(previousHiveDumpPath)); + work.setEventFrom(getEventFromPreviousDumpMetadata(previousValidHiveDumpPath)); lastReplId = incrementalDump(hiveDumpRoot, dmd, cmRoot, hiveDb); } work.setResultValues(Arrays.asList(currentDumpPath.toUri().toString(), String.valueOf(lastReplId))); @@ -166,6 +167,16 @@ public int execute() { return 0; } + private Path getCurrentDumpPath(Path dumpRoot) throws IOException { + Path previousDumpPath = getPreviousDumpPath(dumpRoot); + if (previousDumpPath != null && !validDump(previousDumpPath) && shouldResumePreviousDump(previousDumpPath)) { + //Resume previous dump + return previousDumpPath; + } else { + return new Path(dumpRoot, getNextDumpDir()); + } + } + private void initiateDataCopyTasks() throws SemanticException, IOException { TaskTracker taskTracker = new TaskTracker(conf.getIntVar(HiveConf.ConfVars.REPL_APPROX_MAX_LOAD_TASKS)); List> childTasks = new ArrayList<>(); @@ -183,7 +194,8 @@ private void initiateDataCopyTasks() throws SemanticException, IOException { private void finishRemainingTasks() throws SemanticException, IOException { prepareReturnValues(work.getResultValues()); Path dumpAckFile = new Path(work.getCurrentDumpPath(), - ReplUtils.REPL_HIVE_BASE_DIR + File.separator + ReplUtils.DUMP_ACKNOWLEDGEMENT); + ReplUtils.REPL_HIVE_BASE_DIR + File.separator + + ReplAck.DUMP_ACKNOWLEDGEMENT.toString()); Utils.create(dumpAckFile, conf); deleteAllPreviousDumpMeta(work.getCurrentDumpPath()); } @@ -233,7 +245,7 @@ private Long getEventFromPreviousDumpMetadata(Path previousDumpPath) throws Sema return 0L; } - private Path getPreviousDumpMetadataPath(Path dumpRoot) throws IOException { + private Path getPreviousValidDumpMetadataPath(Path dumpRoot) throws IOException { FileStatus latestValidStatus = null; FileSystem fs = dumpRoot.getFileSystem(conf); if (fs.exists(dumpRoot)) { @@ -241,8 +253,8 @@ private Path getPreviousDumpMetadataPath(Path dumpRoot) throws IOException { for (FileStatus status : statuses) { LOG.info("Evaluating previous dump dir path:{}", status.getPath()); if (latestValidStatus == null) { - latestValidStatus = validDump(fs, status.getPath()) ? status : null; - } else if (validDump(fs, status.getPath()) + latestValidStatus = validDump(status.getPath()) ? status : null; + } else if (validDump(status.getPath()) && status.getModificationTime() > latestValidStatus.getModificationTime()) { latestValidStatus = status; } @@ -254,10 +266,14 @@ private Path getPreviousDumpMetadataPath(Path dumpRoot) throws IOException { return latestDumpDir; } - private boolean validDump(FileSystem fs, Path dumpDir) throws IOException { + private boolean validDump(Path dumpDir) throws IOException { //Check if it was a successful dump - Path hiveDumpDir = new Path(dumpDir, ReplUtils.REPL_HIVE_BASE_DIR); - return fs.exists(new Path(hiveDumpDir, ReplUtils.DUMP_ACKNOWLEDGEMENT)); + if (dumpDir != null) { + FileSystem fs = dumpDir.getFileSystem(conf); + Path hiveDumpDir = new Path(dumpDir, ReplUtils.REPL_HIVE_BASE_DIR); + return fs.exists(new Path(hiveDumpDir, ReplAck.DUMP_ACKNOWLEDGEMENT.toString())); + } + return false; } private boolean shouldDump(Path previousDumpPath) throws IOException { @@ -267,7 +283,7 @@ private boolean shouldDump(Path previousDumpPath) throws IOException { return true; } else { FileSystem fs = previousDumpPath.getFileSystem(conf); - return fs.exists(new Path(previousDumpPath, ReplUtils.LOAD_ACKNOWLEDGEMENT)); + return fs.exists(new Path(previousDumpPath, LOAD_ACKNOWLEDGEMENT.toString())); } } @@ -471,8 +487,10 @@ private Long incrementalDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive // Dump the table to be bootstrapped if required. if (shouldBootstrapDumpTable(table)) { HiveWrapper.Tuple
tableTuple = new HiveWrapper(hiveDb, dbName).table(table); + Path dbDataRoot = new Path(dbRoot, EximUtil.DATA_PATH_NAME); managedTableCopyPaths.addAll( - dumpTable(dbName, tableName, validTxnList, dbRoot, dumpRoot, bootDumpBeginReplId, + dumpTable(dbName, tableName, validTxnList, + dbRoot, dbDataRoot, bootDumpBeginReplId, hiveDb, tableTuple)); } if (tableList != null && isTableSatifiesConfig(table)) { @@ -611,16 +629,20 @@ Long bootStrapDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive hiveDb) List tableList; LOG.info("Bootstrap Dump for db {}", work.dbNameOrPattern); + List extTableCopyWorks = new ArrayList<>(); + List managedTableCopyPaths = new ArrayList<>(); long timeoutInMs = HiveConf.getTimeVar(conf, HiveConf.ConfVars.REPL_BOOTSTRAP_DUMP_OPEN_TXN_TIMEOUT, TimeUnit.MILLISECONDS); long waitUntilTime = System.currentTimeMillis() + timeoutInMs; - String validTxnList = getValidTxnListForReplDump(hiveDb, waitUntilTime); - List extTableCopyWorks = new ArrayList<>(); - List managedTableCopyPaths = new ArrayList<>(); + Path metadataPath = new Path(dumpRoot, EximUtil.METADATA_PATH_NAME); + if (shouldResumePreviousDump(dmd)) { + //clear the metadata. We need to rewrite the metadata as the write id list will be changed + //We can't reuse the previous write id as it might be invalid due to compaction + metadataPath.getFileSystem(conf).delete(metadataPath, true); + } for (String dbName : Utils.matchesDb(hiveDb, work.dbNameOrPattern)) { LOG.debug("Dumping db: " + dbName); - // TODO : Currently we don't support separate table list for each database. tableList = work.replScope.includeAllTables() ? null : new ArrayList<>(); Database db = hiveDb.getDatabase(dbName); @@ -634,8 +656,9 @@ Long bootStrapDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive hiveDb) Utils.getAllTables(hiveDb, dbName, work.replScope).size(), hiveDb.getAllFunctions().size()); replLogger.startLog(); - Path dbRoot = dumpDbMetadata(dbName, dumpRoot, bootDumpBeginReplId, hiveDb); - dumpFunctionMetadata(dbName, dumpRoot, hiveDb); + Path dbRoot = dumpDbMetadata(dbName, metadataPath, bootDumpBeginReplId, hiveDb); + Path dbDataRoot = new Path(new Path(dumpRoot, EximUtil.DATA_PATH_NAME), dbName); + dumpFunctionMetadata(dbName, dbRoot, hiveDb); String uniqueKey = Utils.setDbBootstrapDumpState(hiveDb, dbName); Exception caught = null; @@ -653,7 +676,8 @@ Long bootStrapDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive hiveDb) LOG.debug("Adding table {} to external tables list", tblName); extTableLocations.addAll(writer.dataLocationDump(tableTuple.object)); } - managedTableCopyPaths.addAll(dumpTable(dbName, tblName, validTxnList, dbRoot, dumpRoot, bootDumpBeginReplId, + managedTableCopyPaths.addAll(dumpTable(dbName, tblName, validTxnList, dbRoot, dbDataRoot, + bootDumpBeginReplId, hiveDb, tableTuple)); } catch (InvalidTableException te) { // Bootstrap dump shouldn't fail if the table is dropped/renamed while dumping it. @@ -677,11 +701,11 @@ Long bootStrapDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive hiveDb) throw e; } else { LOG.error("failed to reset the db state for " + uniqueKey - + " on failure of repl dump", e); + + " on failure of repl dump", e); throw caught; } } - if(caught != null) { + if (caught != null) { throw caught; } } @@ -689,21 +713,36 @@ Long bootStrapDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive hiveDb) } Long bootDumpEndReplId = currentNotificationId(hiveDb); LOG.info("Preparing to return {},{}->{}", - dumpRoot.toUri(), bootDumpBeginReplId, bootDumpEndReplId); + dumpRoot.toUri(), bootDumpBeginReplId, bootDumpEndReplId); dmd.setDump(DumpType.BOOTSTRAP, bootDumpBeginReplId, bootDumpEndReplId, cmRoot); dmd.write(); + work.setDirCopyIterator(extTableCopyWorks.iterator()); work.setManagedTableCopyPathIterator(managedTableCopyPaths.iterator()); return bootDumpBeginReplId; } + private boolean shouldResumePreviousDump(DumpMetaData dumpMetaData) { + try { + return dumpMetaData.getEventFrom() != null; + } catch (Exception e) { + LOG.info("No previous dump present"); + return false; + } + } + + private boolean shouldResumePreviousDump(Path dumpPath) { + Path hiveDumpPath = new Path(dumpPath, ReplUtils.REPL_HIVE_BASE_DIR); + return shouldResumePreviousDump(new DumpMetaData(hiveDumpPath, conf)); + } + long currentNotificationId(Hive hiveDb) throws TException { return hiveDb.getMSC().getCurrentNotificationEventId().getEventId(); } - Path dumpDbMetadata(String dbName, Path dumpRoot, long lastReplId, Hive hiveDb) throws Exception { - Path dbRoot = getBootstrapDbRoot(dumpRoot, dbName, false); + Path dumpDbMetadata(String dbName, Path metadataRoot, long lastReplId, Hive hiveDb) throws Exception { // TODO : instantiating FS objects are generally costly. Refactor + Path dbRoot = getBootstrapDbRoot(metadataRoot, dbName, false); FileSystem fs = dbRoot.getFileSystem(conf); Path dumpPath = new Path(dbRoot, EximUtil.METADATA_NAME); HiveWrapper.Tuple database = new HiveWrapper(hiveDb, dbName, lastReplId).database(); @@ -711,12 +750,13 @@ Path dumpDbMetadata(String dbName, Path dumpRoot, long lastReplId, Hive hiveDb) return dbRoot; } - List dumpTable(String dbName, String tblName, String validTxnList, Path dbRoot, Path dumproot, - long lastReplId, Hive hiveDb, HiveWrapper.Tuple
tuple) throws Exception { + List dumpTable(String dbName, String tblName, String validTxnList, Path dbRootMetadata, + Path dbRootData, long lastReplId, Hive hiveDb, + HiveWrapper.Tuple
tuple) throws Exception { LOG.info("Bootstrap Dump for table " + tblName); TableSpec tableSpec = new TableSpec(tuple.object); TableExport.Paths exportPaths = - new TableExport.Paths(work.astRepresentationForErrorMsg, dbRoot, tblName, conf, true); + new TableExport.Paths(work.astRepresentationForErrorMsg, dbRootMetadata, dbRootData, tblName, conf, true); String distCpDoAsUser = conf.getVar(HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER); tuple.replicationSpec.setIsReplace(true); // by default for all other objects this is false if (AcidUtils.isTransactionalTable(tableSpec.tableHandle)) { @@ -827,8 +867,26 @@ private String getNextDumpDir() { } } - void dumpFunctionMetadata(String dbName, Path dumpRoot, Hive hiveDb) throws Exception { - Path functionsRoot = new Path(new Path(dumpRoot, dbName), ReplUtils.FUNCTIONS_ROOT_DIR_NAME); + private Path getPreviousDumpPath(Path dumpRoot) throws IOException { + FileSystem fs = dumpRoot.getFileSystem(conf); + if (fs.exists(dumpRoot)) { + FileStatus[] statuses = fs.listStatus(dumpRoot); + if (statuses.length > 0) { + FileStatus latestValidStatus = statuses[0]; + for (FileStatus status : statuses) { + LOG.info("Evaluating previous dump dir path:{}", status.getPath()); + if (status.getModificationTime() > latestValidStatus.getModificationTime()) { + latestValidStatus = status; + } + } + return latestValidStatus.getPath(); + } + } + return null; + } + + void dumpFunctionMetadata(String dbName, Path dbMetadataRoot, Hive hiveDb) throws Exception { + Path functionsRoot = new Path(dbMetadataRoot, ReplUtils.FUNCTIONS_ROOT_DIR_NAME); List functionNames = hiveDb.getFunctions(dbName, "*"); for (String functionName : functionNames) { HiveWrapper.Tuple tuple = functionTuple(functionName, dbName, hiveDb); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java index 3427b59e67..a5935552c4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hive.ql.exec.repl.bootstrap.load.util.Context; import org.apache.hadoop.hive.ql.exec.repl.incremental.IncrementalLoadTasksBuilder; import org.apache.hadoop.hive.ql.exec.repl.util.AddDependencyToLeaves; -import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.exec.repl.util.TaskTracker; import org.apache.hadoop.hive.ql.exec.util.DAGTraversal; import org.apache.hadoop.hive.ql.metadata.Hive; @@ -65,6 +64,7 @@ import java.util.Map; import static org.apache.hadoop.hive.ql.exec.repl.bootstrap.load.LoadDatabase.AlterDatabase; +import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.LOAD_ACKNOWLEDGEMENT; public class ReplLoadTask extends Task implements Serializable { private final static int ZERO_TASKS = 0; @@ -316,7 +316,7 @@ private void createReplLoadCompleteAckTask() { || (!work.isIncrementalLoad() && !work.hasBootstrapLoadTasks())) { //All repl load tasks are executed and status is 0, create the task to add the acknowledgement AckWork replLoadAckWork = new AckWork( - new Path(work.dumpDirectory, ReplUtils.LOAD_ACKNOWLEDGEMENT)); + new Path(work.dumpDirectory, LOAD_ACKNOWLEDGEMENT.toString())); Task loadAckWorkTask = TaskFactory.get(replLoadAckWork, conf); if (this.childTasks.isEmpty()) { this.childTasks.add(loadAckWorkTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java index 474d8c232c..56efa32cb6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadWork.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hive.ql.exec.repl.incremental.IncrementalLoadEventsIterator; import org.apache.hadoop.hive.ql.exec.repl.incremental.IncrementalLoadTasksBuilder; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; +import org.apache.hadoop.hive.ql.parse.EximUtil; import org.apache.hadoop.hive.ql.plan.Explain; import org.apache.hadoop.hive.ql.session.LineageState; import org.apache.hadoop.hive.ql.exec.Task; @@ -91,8 +92,10 @@ public ReplLoadWork(HiveConf hiveConf, String dumpDirectory, this.constraintsIterator = null; } } else { - this.bootstrapIterator = new BootstrapEventsIterator(dumpDirectory, dbNameToLoadIn, true, hiveConf); - this.constraintsIterator = new ConstraintEventsIterator(dumpDirectory, hiveConf); + this.bootstrapIterator = new BootstrapEventsIterator(new Path(dumpDirectory, EximUtil.METADATA_PATH_NAME) + .toString(), dbNameToLoadIn, true, hiveConf); + this.constraintsIterator = new ConstraintEventsIterator( + new Path(dumpDirectory, EximUtil.METADATA_PATH_NAME).toString(), hiveConf); incrementalLoadTasksBuilder = null; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/TableEvent.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/TableEvent.java index 10732b0f69..05ef274449 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/TableEvent.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/TableEvent.java @@ -42,4 +42,6 @@ Licensed to the Apache Software Foundation (ASF) under one * Exposing the FileSystem implementation outside which is what it should NOT do. */ Path metadataPath(); + + Path dataPath(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java index 1af6a4c637..5bbe20c8c6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/BootstrapEventsIterator.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hive.ql.exec.repl.bootstrap.load.ReplicationState; import org.apache.hadoop.hive.ql.exec.repl.bootstrap.events.BootstrapEvent; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; -import org.apache.hadoop.hive.ql.parse.EximUtil; import org.apache.hadoop.hive.ql.parse.repl.load.log.BootstrapLoadLogger; import org.apache.hadoop.hive.ql.parse.repl.ReplLogger; @@ -82,8 +81,11 @@ public BootstrapEventsIterator(String dumpDirectory, String dbNameToLoadIn, bool throws IOException { Path path = new Path(dumpDirectory); FileSystem fileSystem = path.getFileSystem(hiveConf); + if (!fileSystem.exists(path)) { + throw new IllegalArgumentException("No data to load in path " + dumpDirectory); + } FileStatus[] fileStatuses = - fileSystem.listStatus(new Path(dumpDirectory), ReplUtils.getBootstrapDirectoryFilter(fileSystem)); + fileSystem.listStatus(path, ReplUtils.getBootstrapDirectoryFilter(fileSystem)); if ((fileStatuses == null) || (fileStatuses.length == 0)) { throw new IllegalArgumentException("No data to load in path " + dumpDirectory); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/DatabaseEventsIterator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/DatabaseEventsIterator.java index 72baee6881..a311f7ae22 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/DatabaseEventsIterator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/DatabaseEventsIterator.java @@ -57,7 +57,7 @@ this.hiveConf = hiveConf; FileSystem fileSystem = dbLevelPath.getFileSystem(hiveConf); // this is only there for the use case where we are doing table only replication and not database level - if (!fileSystem.exists(new Path(dbLevelPath + Path.SEPARATOR + EximUtil.METADATA_NAME))) { + if (!fileSystem.exists(new Path(dbLevelPath, EximUtil.METADATA_NAME))) { databaseEventProcessed = true; } @@ -129,7 +129,8 @@ public boolean hasNext() { continue; } if (next.getPath().toString().endsWith(EximUtil.METADATA_NAME)) { - String replacedString = next.getPath().toString().replace(dbLevelPath.toString(), ""); + String replacedString = next.getPath().toString() + .replace(dbLevelPath.toString(), ""); List filteredNames = Arrays.stream(replacedString.split(Path.SEPARATOR)) .filter(StringUtils::isNotBlank) .collect(Collectors.toList()); @@ -174,7 +175,15 @@ public BootstrapEvent next() { LOG.debug("functions directory: {}", next.toString()); return postProcessing(new FSFunctionEvent(next)); } - return postProcessing(new FSTableEvent(hiveConf, next.toString())); + return postProcessing(new FSTableEvent(hiveConf, next.toString(), + new Path(getDbLevelDataPath(), next.getName()).toString())); + } + + private Path getDbLevelDataPath() { + if (dbLevelPath.toString().contains(Path.SEPARATOR + ReplUtils.INC_BOOTSTRAP_ROOT_DIR_NAME + Path.SEPARATOR)) { + return new Path(dbLevelPath, EximUtil.DATA_PATH_NAME); + } + return new Path(new Path(dbLevelPath.getParent().getParent(), EximUtil.DATA_PATH_NAME), dbLevelPath.getName()); } private BootstrapEvent postProcessing(BootstrapEvent bootstrapEvent) { @@ -187,11 +196,14 @@ private BootstrapEvent postProcessing(BootstrapEvent bootstrapEvent) { private BootstrapEvent eventForReplicationState() { if (replicationState.partitionState != null) { BootstrapEvent - bootstrapEvent = new FSPartitionEvent(hiveConf, previous.toString(), replicationState); + bootstrapEvent = new FSPartitionEvent(hiveConf, previous.toString(), + new Path(getDbLevelDataPath(), previous.getName()).toString(), + replicationState); replicationState = null; return bootstrapEvent; } else if (replicationState.lastTableReplicated != null) { - FSTableEvent event = new FSTableEvent(hiveConf, previous.toString()); + FSTableEvent event = new FSTableEvent(hiveConf, previous.toString(), + new Path(new Path(dbLevelPath, EximUtil.DATA_PATH_NAME), previous.getName()).toString()); replicationState = null; return event; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSPartitionEvent.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSPartitionEvent.java index a79f5b7123..2d8240848a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSPartitionEvent.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSPartitionEvent.java @@ -34,9 +34,9 @@ private final ReplicationState replicationState; private final TableEvent tableEvent; - FSPartitionEvent(HiveConf hiveConf, String metadataDir, + FSPartitionEvent(HiveConf hiveConf, String metadataDir, String dataDir, ReplicationState replicationState) { - tableEvent = new FSTableEvent(hiveConf, metadataDir); + tableEvent = new FSTableEvent(hiveConf, metadataDir, dataDir); this.replicationState = replicationState; } @@ -87,4 +87,9 @@ public boolean shouldNotReplicate() { public Path metadataPath() { return tableEvent.metadataPath(); } + + @Override + public Path dataPath() { + return tableEvent.dataPath(); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java index 6d38c0309f..cd3d619332 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java @@ -52,16 +52,19 @@ import static org.apache.hadoop.hive.ql.util.HiveStrictManagedMigration.getHiveUpdater; public class FSTableEvent implements TableEvent { - private final Path fromPath; + private final Path fromPathMetadata; + private final Path fromPathData; private final MetaData metadata; private final HiveConf hiveConf; - FSTableEvent(HiveConf hiveConf, String metadataDir) { + FSTableEvent(HiveConf hiveConf, String metadataDir, String dataDir) { try { URI fromURI = EximUtil.getValidatedURI(hiveConf, PlanUtils.stripQuotes(metadataDir)); - fromPath = new Path(fromURI.getScheme(), fromURI.getAuthority(), fromURI.getPath()); + fromPathMetadata = new Path(fromURI.getScheme(), fromURI.getAuthority(), fromURI.getPath()); + URI fromURIData = EximUtil.getValidatedURI(hiveConf, PlanUtils.stripQuotes(dataDir)); + fromPathData = new Path(fromURIData.getScheme(), fromURIData.getAuthority(), fromURIData.getPath()); FileSystem fs = FileSystem.get(fromURI, hiveConf); - metadata = EximUtil.readMetaData(fs, new Path(fromPath, EximUtil.METADATA_NAME)); + metadata = EximUtil.readMetaData(fs, new Path(fromPathMetadata, EximUtil.METADATA_NAME)); this.hiveConf = hiveConf; } catch (Exception e) { throw new RuntimeException(e); @@ -82,7 +85,12 @@ public boolean shouldNotReplicate() { @Override public Path metadataPath() { - return fromPath; + return fromPathMetadata; + } + + @Override + public Path dataPath() { + return fromPathData; } /** @@ -150,7 +158,7 @@ public ImportTableDesc tableDesc(String dbName) throws SemanticException { //TODO: if partitions are loaded lazily via the iterator then we will have to avoid conversion of everything here as it defeats the purpose. for (Partition partition : metadata.getPartitions()) { // TODO: this should ideally not create AddPartitionDesc per partition - AlterTableAddPartitionDesc partsDesc = addPartitionDesc(fromPath, tblDesc, partition); + AlterTableAddPartitionDesc partsDesc = addPartitionDesc(fromPathMetadata, tblDesc, partition); descs.add(partsDesc); } return descs; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java index 05a590a189..b98f1f3b38 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadPartitions.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.parse.EximUtil; import org.apache.hadoop.hive.ql.parse.HiveTableName; import org.apache.hadoop.hive.ql.parse.ImportSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -57,7 +56,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.Serializable; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -243,7 +241,7 @@ private void addPartition(boolean hasMorePartitions, AlterTableAddPartitionDesc Task copyTask = ReplCopyTask.getLoadCopyTask( event.replicationSpec(), - new Path(sourceWarehousePartitionLocation, EximUtil.DATA_PATH_NAME), + new Path(event.dataPath() + Path.SEPARATOR + getPartitionName(sourceWarehousePartitionLocation)), stagingDir, context.hiveConf, false ); @@ -272,6 +270,12 @@ private void addPartition(boolean hasMorePartitions, AlterTableAddPartitionDesc return ptnRootTask; } + private String getPartitionName(Path partitionMetadataFullPath) { + //Get partition name by removing the metadata base path. + //Needed for getting the data path + return partitionMetadataFullPath.toString().substring(event.metadataPath().toString().length()); + } + /** * This will create the move of partition data from temp path to actual path */ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java index 82a30319b5..bb20687f6f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/table/LoadTable.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.parse.EximUtil; import org.apache.hadoop.hive.ql.parse.ImportSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -223,7 +222,7 @@ private void newTableTasks(ImportTableDesc tblDesc, Task tblRootTask, TableLo if (shouldCreateLoadTableTask) { LOG.debug("adding dependent ReplTxnTask/CopyWork/MoveWork for table"); Task loadTableTask = loadTableTask(table, replicationSpec, new Path(tblDesc.getLocation()), - event.metadataPath()); + event.dataPath()); parentTask.addDependentTask(loadTableTask); } tracker.addTask(tblRootTask); @@ -272,7 +271,7 @@ static TableLocationTuple tableLocation(ImportTableDesc tblDesc, Database parent private Task loadTableTask(Table table, ReplicationSpec replicationSpec, Path tgtPath, Path fromURI) { - Path dataPath = new Path(fromURI, EximUtil.DATA_PATH_NAME); + Path dataPath = fromURI; Path tmpPath = tgtPath; // if move optimization is enabled, copy the files directly to the target path. No need to create the staging dir. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java index 211c3f014d..939cbc3a35 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java @@ -94,10 +94,6 @@ // Configuration to enable/disable dumping ACID tables. Used only for testing and shouldn't be // seen in production or in case of tests other than the ones where it's required. public static final String REPL_DUMP_INCLUDE_ACID_TABLES = "hive.repl.dump.include.acid.tables"; - //Acknowledgement for repl dump complete - public static final String DUMP_ACKNOWLEDGEMENT = "_finished_dump"; - //Acknowledgement for repl load complete - public static final String LOAD_ACKNOWLEDGEMENT = "_finished_load"; /** * Bootstrap REPL LOAD operation type on the examined object based on ckpt state. */ @@ -240,7 +236,8 @@ public static PathFilter getEventsDirectoryFilter(final FileSystem fs) { try { return fs.isDirectory(p) && !p.getName().equalsIgnoreCase(ReplUtils.INC_BOOTSTRAP_ROOT_DIR_NAME) && !p.getName().equalsIgnoreCase(ReplUtils.REPL_TABLE_LIST_DIR_NAME) - && !p.getName().equalsIgnoreCase(EximUtil.DATA_PATH_NAME); + && !p.getName().equalsIgnoreCase(EximUtil.DATA_PATH_NAME) + && !p.getName().equalsIgnoreCase(EximUtil.METADATA_PATH_NAME); } catch (IOException e) { throw new RuntimeException(e); } @@ -251,7 +248,8 @@ public static PathFilter getBootstrapDirectoryFilter(final FileSystem fs) { return p -> { try { return fs.isDirectory(p) && !p.getName().equalsIgnoreCase(ReplUtils.REPL_TABLE_LIST_DIR_NAME) - && !p.getName().equalsIgnoreCase(EximUtil.DATA_PATH_NAME); + && !p.getName().equalsIgnoreCase(EximUtil.DATA_PATH_NAME) + && !p.getName().equalsIgnoreCase(EximUtil.METADATA_PATH_NAME); } catch (IOException e) { throw new RuntimeException(e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java index bc90ea1db7..5ada55f31e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java @@ -74,6 +74,7 @@ public static final String METADATA_NAME = "_metadata"; public static final String FILES_NAME = "_files"; public static final String DATA_PATH_NAME = "data"; + public static final String METADATA_PATH_NAME = "metadata"; private static final Logger LOG = LoggerFactory.getLogger(EximUtil.class); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java index 8802139e84..c4ff070da6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.exec.repl.ReplAck; import org.apache.hadoop.hive.ql.exec.repl.ReplDumpWork; import org.apache.hadoop.hive.ql.exec.repl.ReplLoadWork; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; @@ -54,6 +55,7 @@ import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY; import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.REPL_ENABLE_MOVE_OPTIMIZATION; import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.REPL_MOVE_OPTIMIZED_FILE_SCHEMES; +import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.LOAD_ACKNOWLEDGEMENT; import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DBNAME; import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_REPLACE; import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_REPL_CONFIG; @@ -424,8 +426,9 @@ private Path getCurrentLoadPath() throws IOException, SemanticException { } } Path hiveDumpPath = new Path(latestUpdatedStatus.getPath(), ReplUtils.REPL_HIVE_BASE_DIR); - if (loadPathBase.getFileSystem(conf).exists(new Path(hiveDumpPath, ReplUtils.DUMP_ACKNOWLEDGEMENT)) - && !loadPathBase.getFileSystem(conf).exists(new Path(hiveDumpPath, ReplUtils.LOAD_ACKNOWLEDGEMENT))) { + if (loadPathBase.getFileSystem(conf).exists(new Path(hiveDumpPath, + ReplAck.DUMP_ACKNOWLEDGEMENT.toString())) + && !loadPathBase.getFileSystem(conf).exists(new Path(hiveDumpPath, LOAD_ACKNOWLEDGEMENT.toString()))) { return hiveDumpPath; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/PartitionExport.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/PartitionExport.java index c3b10815f6..73dc606d87 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/PartitionExport.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/PartitionExport.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.PartitionIterable; -import org.apache.hadoop.hive.ql.parse.EximUtil; import org.apache.hadoop.hive.ql.parse.EximUtil.ManagedTableCopyPath; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.repl.dump.io.FileOperations; @@ -118,12 +117,13 @@ // this the data copy List dataPathList = Utils.getDataPathList(partition.getDataLocation(), forReplicationSpec, hiveConf); - Path rootDataDumpDir = paths.partitionExportDir(partitionName); + Path rootDataDumpDir = paths.partitionMetadataExportDir(partitionName); new FileOperations(dataPathList, rootDataDumpDir, distCpDoAsUser, hiveConf, mmCtx) .export(isExportTask); + Path dataDumpDir = new Path(paths.dataExportRootDir(), partitionName); LOG.debug("Thread: {}, finish partition dump {}", threadName, partitionName); return new ManagedTableCopyPath(forReplicationSpec, partition.getDataLocation(), - new Path(rootDataDumpDir, EximUtil.DATA_PATH_NAME)); + dataDumpDir); } catch (Exception e) { throw new RuntimeException(e.getMessage(), e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java index 683f3c0362..b11afe80a1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java @@ -198,22 +198,29 @@ private boolean shouldExport() { public static class Paths { private final String astRepresentationForErrorMsg; private final HiveConf conf; - //variable access should not be done and use exportRootDir() instead. - private final Path _exportRootDir; + //metadataExportRootDir and dataExportRootDir variable access should not be done and use + // metadataExportRootDir() and dataExportRootDir() instead. + private final Path metadataExportRootDir; + private final Path dataExportRootDir; private final FileSystem exportFileSystem; - private boolean writeData, exportRootDirCreated = false; + private boolean writeData, metadataExportRootDirCreated = false, dataExportRootDirCreated = false; - public Paths(String astRepresentationForErrorMsg, Path dbRoot, String tblName, HiveConf conf, + public Paths(String astRepresentationForErrorMsg, Path dbMetadataRoot, Path dbDataRoot, + String tblName, HiveConf conf, boolean shouldWriteData) throws SemanticException { this.astRepresentationForErrorMsg = astRepresentationForErrorMsg; this.conf = conf; this.writeData = shouldWriteData; - Path tableRoot = new Path(dbRoot, tblName); - URI exportRootDir = EximUtil.getValidatedURI(conf, tableRoot.toUri().toString()); - validateTargetDir(exportRootDir); - this._exportRootDir = new Path(exportRootDir); + Path tableRootForMetadataDump = new Path(dbMetadataRoot, tblName); + Path tableRootForDataDump = new Path(dbDataRoot, tblName); + URI metadataExportRootDirUri = EximUtil.getValidatedURI(conf, tableRootForMetadataDump.toUri().toString()); + validateTargetDir(metadataExportRootDirUri); + URI dataExportRootDirUri = EximUtil.getValidatedURI(conf, tableRootForDataDump.toUri().toString()); + validateTargetDataDir(dataExportRootDirUri); + this.metadataExportRootDir = new Path(metadataExportRootDirUri); + this.dataExportRootDir = new Path(dataExportRootDirUri); try { - this.exportFileSystem = this._exportRootDir.getFileSystem(conf); + this.exportFileSystem = this.metadataExportRootDir.getFileSystem(conf); } catch (IOException e) { throw new SemanticException(e); } @@ -223,37 +230,58 @@ public Paths(String astRepresentationForErrorMsg, String path, HiveConf conf, boolean shouldWriteData) throws SemanticException { this.astRepresentationForErrorMsg = astRepresentationForErrorMsg; this.conf = conf; - this._exportRootDir = new Path(EximUtil.getValidatedURI(conf, path)); + this.metadataExportRootDir = new Path(EximUtil.getValidatedURI(conf, path)); + this.dataExportRootDir = new Path(new Path(EximUtil.getValidatedURI(conf, path)), EximUtil.DATA_PATH_NAME); this.writeData = shouldWriteData; try { - this.exportFileSystem = _exportRootDir.getFileSystem(conf); + this.exportFileSystem = metadataExportRootDir.getFileSystem(conf); } catch (IOException e) { throw new SemanticException(e); } } - Path partitionExportDir(String partitionName) throws SemanticException { - return exportDir(new Path(exportRootDir(), partitionName)); + Path partitionMetadataExportDir(String partitionName) throws SemanticException { + return exportDir(new Path(metadataExportRootDir(), partitionName)); } /** - * Access to the {@link #_exportRootDir} should only be done via this method + * Access to the {@link #metadataExportRootDir} should only be done via this method * since the creation of the directory is delayed until we figure out if we want * to write something or not. This is specifically important to prevent empty non-native * directories being created in repl dump. */ - public Path exportRootDir() throws SemanticException { - if (!exportRootDirCreated) { + public Path metadataExportRootDir() throws SemanticException { + if (!metadataExportRootDirCreated) { try { - if (!exportFileSystem.exists(this._exportRootDir) && writeData) { - exportFileSystem.mkdirs(this._exportRootDir); + if (!exportFileSystem.exists(this.metadataExportRootDir) && writeData) { + exportFileSystem.mkdirs(this.metadataExportRootDir); } - exportRootDirCreated = true; + metadataExportRootDirCreated = true; } catch (IOException e) { throw new SemanticException(e); } } - return _exportRootDir; + return metadataExportRootDir; + } + + /** + * Access to the {@link #dataExportRootDir} should only be done via this method + * since the creation of the directory is delayed until we figure out if we want + * to write something or not. This is specifically important to prevent empty non-native + * directories being created in repl dump. + */ + public Path dataExportRootDir() throws SemanticException { + if (!dataExportRootDirCreated) { + try { + if (!exportFileSystem.exists(this.dataExportRootDir) && writeData) { + exportFileSystem.mkdirs(this.dataExportRootDir); + } + dataExportRootDirCreated = true; + } catch (IOException e) { + throw new SemanticException(e); + } + } + return dataExportRootDir; } private Path exportDir(Path exportDir) throws SemanticException { @@ -269,7 +297,7 @@ private Path exportDir(Path exportDir) throws SemanticException { } private Path metaDataExportFile() throws SemanticException { - return new Path(exportRootDir(), EximUtil.METADATA_NAME); + return new Path(metadataExportRootDir(), EximUtil.METADATA_NAME); } /** @@ -277,7 +305,7 @@ private Path metaDataExportFile() throws SemanticException { * Partition's data export directory is created within the export semantics of partition. */ private Path dataExportDir() throws SemanticException { - return exportDir(new Path(exportRootDir(), EximUtil.DATA_PATH_NAME)); + return exportDir(dataExportRootDir()); } /** @@ -310,6 +338,30 @@ private void validateTargetDir(URI rootDirExportFile) throws SemanticException { throw new SemanticException(astRepresentationForErrorMsg, e); } } + + /** + * this level of validation might not be required as the root directory in which we dump will + * be different for each run hence possibility of it having data is not there. + */ + private void validateTargetDataDir(URI rootDirExportFile) throws SemanticException { + try { + FileSystem fs = FileSystem.get(rootDirExportFile, conf); + Path toPath = new Path(rootDirExportFile.getScheme(), rootDirExportFile.getAuthority(), + rootDirExportFile.getPath()); + try { + FileStatus tgt = fs.getFileStatus(toPath); + // target exists + if (!tgt.isDirectory()) { + throw new SemanticException( + astRepresentationForErrorMsg + ": " + "Target is not a directory : " + + rootDirExportFile); + } + } catch (FileNotFoundException ignored) { + } + } catch (IOException e) { + throw new SemanticException(astRepresentationForErrorMsg, e); + } + } } public static class AuthEntities { @@ -343,7 +395,7 @@ public AuthEntities getAuthEntities() throws SemanticException { authEntities.inputs.add(new ReadEntity(tableSpec.tableHandle)); } } - authEntities.outputs.add(toWriteEntity(paths.exportRootDir(), conf)); + authEntities.outputs.add(toWriteEntity(paths.metadataExportRootDir(), conf)); } catch (Exception e) { throw new SemanticException(e); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestReplDumpTask.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestReplDumpTask.java index 81fac252a3..263f74f208 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestReplDumpTask.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestReplDumpTask.java @@ -84,11 +84,11 @@ String getValidTxnListForReplDump(Hive hiveDb, long waitUntilTime) { } @Override - void dumpFunctionMetadata(String dbName, Path dumpRoot, Hive hiveDb) { + void dumpFunctionMetadata(String dbName, Path dbMetadataRoot, Hive hiveDb) { } @Override - Path dumpDbMetadata(String dbName, Path dumpRoot, long lastReplId, Hive hiveDb) { + Path dumpDbMetadata(String dbName, Path metadataRoot, long lastReplId, Hive hiveDb) { return Mockito.mock(Path.class); } @@ -128,8 +128,9 @@ public void removeDBPropertyToPreventRenameWhenBootstrapDumpOfTableFails() throw private int tableDumpCount = 0; @Override - List dumpTable(String dbName, String tblName, String validTxnList, Path dbRoot, - Path replDataDir, long lastReplId, Hive hiveDb, + List dumpTable(String dbName, String tblName, String validTxnList, + Path dbRootMetadata, Path dbRootData, + long lastReplId, Hive hiveDb, HiveWrapper.Tuple
tuple) throws Exception { tableDumpCount++;