diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExport.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExportImport.java similarity index 63% rename from itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExport.java rename to itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExportImport.java index 1c7c844..1f19dfd 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExport.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestExportImport.java @@ -31,14 +31,16 @@ import java.io.IOException; -public class TestExport { +public class TestExportImport { - protected static final Logger LOG = LoggerFactory.getLogger(TestExport.class); - private static WarehouseInstance hiveWarehouse; + protected static final Logger LOG = LoggerFactory.getLogger(TestExportImport.class); + private static WarehouseInstance srcHiveWarehouse; + private static WarehouseInstance destHiveWarehouse; @Rule public final TestName testName = new TestName(); private String dbName; + private String replDbName; @BeforeClass public static void classLevelSetup() throws Exception { @@ -46,18 +48,22 @@ public static void classLevelSetup() throws Exception { conf.set("dfs.client.use.datanode.hostname", "true"); MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); - hiveWarehouse = new WarehouseInstance(LOG, miniDFSCluster, false); + srcHiveWarehouse = new WarehouseInstance(LOG, miniDFSCluster, false); + destHiveWarehouse = new WarehouseInstance(LOG, miniDFSCluster, false); } @AfterClass public static void classLevelTearDown() throws IOException { - hiveWarehouse.close(); + srcHiveWarehouse.close(); + destHiveWarehouse.close(); } @Before public void setup() throws Throwable { dbName = testName.getMethodName() + "_" + +System.currentTimeMillis(); - hiveWarehouse.run("create database " + dbName); + replDbName = dbName + "_dupe"; + srcHiveWarehouse.run("create database " + dbName); + destHiveWarehouse.run("create database " + replDbName); } @Test @@ -65,7 +71,7 @@ public void shouldExportImportATemporaryTable() throws Throwable { String path = "hdfs:///tmp/" + dbName + "/"; String exportPath = "'" + path + "'"; String importDataPath = path + "/data"; - hiveWarehouse + srcHiveWarehouse .run("use " + dbName) .run("create temporary table t1 (i int)") .run("insert into table t1 values (1),(2)") @@ -75,4 +81,20 @@ public void shouldExportImportATemporaryTable() throws Throwable { .run("select * from t2") .verifyResults(new String[] { "1", "2" }); } + + @Test + public void dataImportAfterMetadataOnlyImport() throws Throwable { + String path = "hdfs:///tmp/" + dbName + "/"; + String exportMDPath = "'" + path + "1/'"; + String exportDataPath = "'" + path + "2/'"; + srcHiveWarehouse.run("create table " + dbName + ".t1 (i int)") + .run("insert into table " + dbName + ".t1 values (1),(2)") + .run("export table " + dbName + ".t1 to " + exportMDPath + " for metadata replication('1')") + .run("export table " + dbName + ".t1 to " + exportDataPath + " for replication('2')"); + + destHiveWarehouse.run("import table " + replDbName + ".t1 from " + exportMDPath) + .run("import table " + replDbName + ".t1 from " + exportDataPath) + .run("select * from " + replDbName + ".t1") + .verifyResults(new String[] { "1", "2" }); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java index 1c54d29..f5253d2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSpec.java @@ -218,9 +218,17 @@ private void init(ASTNode node){ // -> ^(TOK_REPLICATION $replId $isMetadataOnly) isInReplicationScope = true; eventId = PlanUtils.stripQuotes(node.getChild(0).getText()); - if (node.getChildCount() > 1){ - if (node.getChild(1).getText().toLowerCase().equals("metadata")) { - isMetadataOnly= true; + if ((node.getChildCount() > 1) + && node.getChild(1).getText().toLowerCase().equals("metadata")) { + isMetadataOnly= true; + try { + if (Long.parseLong(eventId) >= 0) { + // If metadata-only dump, then the state of the dump shouldn't be the latest event id as + // the data is not yet dumped and shall be dumped in future export. + currStateId = eventId; + } + } catch (Exception ex) { + // Ignore the exception and fall through the default currentStateId } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java index 5eae35a..71f93ec 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/TableExport.java @@ -82,6 +82,14 @@ public AuthEntities write() throws SemanticException { if (tableSpec.tableHandle.isView()) { replicationSpec.setIsMetadataOnly(true); } + if (this.replicationSpec.getCurrentReplicationState() == null) { + try { + long currentEventId = db.getMSC().getCurrentNotificationEventId().getEventId(); + this.replicationSpec.setCurrentReplicationState(String.valueOf(currentEventId)); + } catch (Exception e) { + throw new SemanticException("Error when getting current notification event ID", e); + } + } PartitionIterable withPartitions = partitions(); writeMetaData(withPartitions); if (!replicationSpec.isMetadataOnly()) { @@ -93,8 +101,6 @@ public AuthEntities write() throws SemanticException { private PartitionIterable partitions() throws SemanticException { try { - long currentEventId = db.getMSC().getCurrentNotificationEventId().getEventId(); - replicationSpec.setCurrentReplicationState(String.valueOf(currentEventId)); if (tableSpec.tableHandle.isPartitioned()) { if (tableSpec.specType == TableSpec.SpecType.TABLE_ONLY) { // TABLE-ONLY, fetch partitions if regular export, don't if metadata-only