diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java index fd05e99..73102a7 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java @@ -94,6 +94,8 @@ public void targetAndSourceHaveDifferentEncryptionZoneKeys() throws Throwable { new HashMap() {{ put(HiveConf.ConfVars.HIVE_IN_TEST.varname, "false"); put(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "false"); + put(HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname, + UserGroupInformation.getCurrentUser().getUserName()); }}, "test_key123"); WarehouseInstance.Tuple tuple = @@ -105,7 +107,8 @@ public void targetAndSourceHaveDifferentEncryptionZoneKeys() throws Throwable { replica .run("repl load " + replicatedDbName + " from '" + tuple.dumpLocation - + "' with('hive.repl.add.raw.reserved.namespace'='true')") + + "' with('hive.repl.add.raw.reserved.namespace'='true', " + + "'distcp.options.pugpbx'='', 'distcp.options.skipcrccheck'='')") .run("use " + replicatedDbName) .run("repl status " + replicatedDbName) .verifyResult(tuple.lastReplicationId) diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java index f95e1c6..dc31e92 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java @@ -123,6 +123,7 @@ private void initialize(String cmRoot, String warehouseRoot, hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); if (!hiveConf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER).equals("org.apache.hadoop.hive.ql.lockmgr.DbTxnManager")) { hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FileOperations.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FileOperations.java index b61a945..b3e76b6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FileOperations.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/FileOperations.java @@ -133,27 +133,16 @@ private void copyMmPath() throws LoginException, IOException { return validPaths; } - /** * This needs the root data directory to which the data needs to be exported to. * The data export here is a list of files either in table/partition that are written to the _files - * in the exportRootDataDir provided. + * in the exportRootDataDir provided. In case of MM/ACID tables, we expect this pathlist to be + * already passed as valid paths by caller based on ValidWriteIdList. So, mmCtx is ignored here. */ private void exportFilesAsList() throws SemanticException, IOException { try (BufferedWriter writer = writer()) { - if (mmCtx != null) { - assert dataPathList.size() == 1; - Path dataPath = dataPathList.get(0); - ValidWriteIdList ids = AcidUtils.getTableValidWriteIdList( - hiveConf, mmCtx.getFqTableName()); - List validPaths = getMmValidPaths(ids, dataPath); - for (Path mmPath : validPaths) { - writeFilesList(listFilesInDir(mmPath), writer, AcidUtils.getAcidSubDir(dataPath)); - } - } else { - for (Path dataPath : dataPathList) { - writeFilesList(listFilesInDir(dataPath), writer, AcidUtils.getAcidSubDir(dataPath)); - } + for (Path dataPath : dataPathList) { + writeFilesList(listFilesInDir(dataPath), writer, AcidUtils.getAcidSubDir(dataPath)); } } }