diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationWithTableMigrationEx.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationWithTableMigrationEx.java index 4637da1616..5f59a2a684 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationWithTableMigrationEx.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationWithTableMigrationEx.java @@ -348,4 +348,71 @@ public void testIncLoadPenFlagWithMoveOptimization() throws Throwable { replica.load(replicatedDbName, tuple.dumpLocation, withClause); assertFalse(ReplUtils.isFirstIncPending(replica.getDatabase(replicatedDbName).getParameters())); } + + private void verifyUserName(String userName) throws Throwable { + assertTrue(userName.equalsIgnoreCase(primary.getTable(primaryDbName, "tbl_own").getOwner())); + assertTrue(userName.equalsIgnoreCase(replica.getTable(replicatedDbName, "tbl_own").getOwner())); + assertTrue(userName.equalsIgnoreCase(primary.getTable(primaryDbName, "tacid").getOwner())); + assertTrue(userName.equalsIgnoreCase(replica.getTable(replicatedDbName, "tacid").getOwner())); + assertTrue(userName.equalsIgnoreCase(primary.getTable(primaryDbName, "tacidpart").getOwner())); + assertTrue(userName.equalsIgnoreCase(replica.getTable(replicatedDbName, "tacidpart").getOwner())); + assertTrue(userName.equalsIgnoreCase(primary.getTable(primaryDbName, "tbl_part").getOwner())); + assertTrue(userName.equalsIgnoreCase(replica.getTable(replicatedDbName, "tbl_part").getOwner())); + assertTrue(userName.equalsIgnoreCase(primary.getTable(primaryDbName, "view_own").getOwner())); + assertTrue(userName.equalsIgnoreCase(replica.getTable(replicatedDbName, "view_own").getOwner())); + } + + private void alterUserName(String userName) throws Throwable { + primary.run("use " + primaryDbName) + .run("alter table tbl_own set owner USER " + userName) + .run("alter table tacid set owner USER " + userName) + .run("alter table tacidpart set owner USER " + userName) + .run("alter table tbl_part set owner USER " + userName) + .run("alter table view_own set owner USER " + userName); + } + + @Test + public void testOnwerPropagation() throws Throwable { + primary.run("use " + primaryDbName) + .run("create table tbl_own (fld int)") + .run("create table tacid (id int) clustered by(id) into 3 buckets stored as orc ") + .run("create table tacidpart (place string) partitioned by (country string) clustered by(place) " + + "into 3 buckets stored as orc ") + .run("create table tbl_part (fld int) partitioned by (country string)") + .run("insert into tbl_own values (1)") + .run("create view view_own as select * from tbl_own"); + + // test bootstrap + alterUserName("hive"); + WarehouseInstance.Tuple tuple = primary.dump(primaryDbName, null); + replica.loadWithoutExplain(replicatedDbName, tuple.dumpLocation); + verifyUserName("hive"); + + // test incremental + alterUserName("hive1"); + tuple = primary.dump(primaryDbName, tuple.lastReplicationId); + replica.loadWithoutExplain(replicatedDbName, tuple.dumpLocation); + verifyUserName("hive1"); + } + + @Test + public void testOnwerPropagationInc() throws Throwable { + WarehouseInstance.Tuple tuple = primary.dump(primaryDbName, null); + replica.loadWithoutExplain(replicatedDbName, tuple.dumpLocation); + + primary.run("use " + primaryDbName) + .run("create table tbl_own (fld int)") + .run("create table tacid (id int) clustered by(id) into 3 buckets stored as orc ") + .run("create table tacidpart (place string) partitioned by (country string) clustered by(place) " + + "into 3 buckets stored as orc ") + .run("create table tbl_part (fld int) partitioned by (country string)") + .run("insert into tbl_own values (1)") + .run("create view view_own as select * from tbl_own"); + + // test incremental when table is getting created in the same load + alterUserName("hive"); + tuple = primary.dump(primaryDbName, tuple.lastReplicationId); + replica.loadWithoutExplain(replicatedDbName, tuple.dumpLocation); + verifyUserName("hive"); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 76339f1b82..a39126f52a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -4699,6 +4699,9 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { oldview.setOutputFormatClass(crtView.getOutputFormat()); } oldview.checkValidity(null); + if (crtView.getOwnerName() != null) { + oldview.setOwner(crtView.getOwnerName()); + } db.alterTable(crtView.getViewName(), oldview, false, null, true); addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_NO_LOCK)); } else { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java index 99a8d5d271..4b382f2762 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/events/filesystem/FSTableEvent.java @@ -123,6 +123,9 @@ public ImportTableDesc tableDesc(String dbName) throws SemanticException { tableDesc.setExternal(true); } tableDesc.setReplicationSpec(replicationSpec()); + if (table.getOwner() != null) { + tableDesc.setOwnerName(table.getOwner()); + } return tableDesc; } catch (Exception e) { throw new SemanticException(e); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index ed063524de..b6b4f585a8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -276,8 +276,8 @@ public static boolean prepareImport(boolean isImportCmd, // Create table associated with the import // Executed if relevant, and used to contain all the other details about the table if not. ImportTableDesc tblDesc; + org.apache.hadoop.hive.metastore.api.Table tblObj = rv.getTable(); try { - org.apache.hadoop.hive.metastore.api.Table tblObj = rv.getTable(); // The table can be non acid in case of replication from a cluster with STRICT_MANAGED set to false. if (!TxnUtils.isTransactionalTable(tblObj) && replicationSpec.isInReplicationScope() && x.getConf().getBoolVar(HiveConf.ConfVars.HIVE_STRICT_MANAGED_TABLES) && @@ -317,6 +317,7 @@ public static boolean prepareImport(boolean isImportCmd, } inReplicationScope = true; tblDesc.setReplWriteId(writeId); + tblDesc.setOwnerName(tblObj.getOwner()); } if (isExternalSet) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java index c71ff6d713..4514af1f08 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java @@ -115,6 +115,7 @@ // This is not needed beyond compilation, so it is transient. private transient FileSinkDesc writer; private Long replWriteId; // to be used by repl task to get the txn and valid write id list + private String ownerName = null; public CreateTableDesc() { } @@ -909,6 +910,10 @@ public Table toTable(HiveConf conf) throws HiveException { StatsSetupConst.FALSE); } } + + if (ownerName != null) { + tbl.setOwner(ownerName); + } return tbl; } @@ -939,4 +944,12 @@ public Long getReplWriteId() { public void setReplWriteId(Long replWriteId) { this.replWriteId = replWriteId; } + + public String getOwnerName() { + return ownerName; + } + + public void setOwnerName(String ownerName) { + this.ownerName = ownerName; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java index 7130aba597..ce85d40653 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java @@ -70,6 +70,7 @@ private Map serdeProps; // only used for materialized views private Set tablesUsed; // only used for materialized views private ReplicationSpec replicationSpec = null; + private String ownerName = null; /** * For serialization only. @@ -412,6 +413,10 @@ public Table toTable(HiveConf conf) throws HiveException { } } + if (ownerName != null) { + tbl.setOwner(ownerName); + } + // Sets the column state for the create view statement (false since it is a creation). // Similar to logic in CreateTableDesc. StatsSetupConst.setStatsStateForCreateTable(tbl.getTTable().getParameters(), null, @@ -419,4 +424,12 @@ public Table toTable(HiveConf conf) throws HiveException { return tbl; } + + public void setOwnerName(String ownerName) { + this.ownerName = ownerName; + } + + public String getOwnerName() { + return this.ownerName; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java index 5c30fca2d3..017e1c7f9b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ImportTableDesc.java @@ -366,4 +366,17 @@ public void setReplWriteId(Long replWriteId) { this.createTblDesc.setReplWriteId(replWriteId); } } + + public void setOwnerName(String ownerName) { + switch (getDescType()) { + case TABLE: + createTblDesc.setOwnerName(ownerName); + break; + case VIEW: + createViewDesc.setOwnerName(ownerName); + break; + default: + throw new RuntimeException("Invalid table type : " + getDescType()); + } + } }