diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java index 4be9144162..94d92c53d2 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java @@ -277,6 +277,58 @@ public void testAcidTablesBootstrapWithOpenTxnsTimeout() throws Throwable { verifyCompactionQueue(tables, replicatedDbName, replicaConf); } + @Test + public void testAcidTablesCreateTableIncremental() throws Throwable { + // Create 2 tables, one partitioned and other not. + primary.run("use " + primaryDbName) + .run("create table t1 (id int) clustered by(id) into 3 buckets stored as orc " + + "tblproperties (\"transactional\"=\"true\")") + .run("insert into t1 values(1)") + .run("create table t2 (rank int) partitioned by (name string) tblproperties(\"transactional\"=\"true\", " + + "\"transactional_properties\"=\"insert_only\")") + .run("insert into t2 partition(name='Bob') values(11)") + .run("insert into t2 partition(name='Carl') values(10)"); + + WarehouseInstance.Tuple bootstrapDump = primary + .run("use " + primaryDbName) + .dump(primaryDbName); + + replica.load(replicatedDbName, primaryDbName) + .run("use " + replicatedDbName) + .run("show tables") + .verifyResults(new String[] {"t1", "t2"}) + .run("repl status " + replicatedDbName) + .verifyResult(bootstrapDump.lastReplicationId) + .run("select id from t1") + .verifyResults(new String[]{"1"}) + .run("select rank from t2 order by rank") + .verifyResults(new String[] {"10", "11"}); + + WarehouseInstance.Tuple incrDump = primary.run("use "+ primaryDbName) + .run("create table t3 (id int)") + .run("insert into t3 values (99)") + .run("create table t4 (standard int) partitioned by (name string) stored as orc " + + "tblproperties (\"transactional\"=\"true\")") + .run("insert into t4 partition(name='Tom') values(11)") + .dump(primaryDbName); + + replica.load(replicatedDbName, primaryDbName) + .run("use " + replicatedDbName) + .run("show tables") + .verifyResults(new String[] {"t1", "t2", "t3", "t4"}) + .run("repl status " + replicatedDbName) + .verifyResult(incrDump.lastReplicationId) + .run("select id from t1") + .verifyResults(new String[]{"1"}) + .run("select rank from t2 order by rank") + .verifyResults(new String[] {"10", "11"}) + .run("select id from t3") + .verifyResults(new String[]{"99"}) + .run("select standard from t4 order by standard") + .verifyResults(new String[] {"11"}); + } + + @Test public void testAcidTablesBootstrapWithOpenTxnsDiffDb() throws Throwable { int numTxns = 5; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index 614453bf1f..3b9bc6f16e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -1192,6 +1192,10 @@ private static void createReplImportTasks( // have been already created when replaying previous events. So no need to create table // again. if (x.getEventType() != DumpType.EVENT_COMMIT_TXN) { + //Don't set location for managed tables while creating the table. + if (x.getEventType() == DumpType.EVENT_CREATE_TABLE && !tblDesc.isExternal()) { + tblDesc.setLocation(null); + } Task t = createTableTask(tblDesc, x); if (dependentTasks != null) { dependentTasks.forEach(task -> t.addDependentTask(task));