diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java index ebbb0b6..d9e206c 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.ReplChangeManager.RecycleType; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Partition; @@ -172,7 +173,7 @@ public void testRecyclePartTable() throws Exception { ReplChangeManager cm = ReplChangeManager.getInstance(hiveConf); // verify cm.recycle(db, table, part) api moves file to cmroot dir - int ret = cm.recycle(part1Path, false); + int ret = cm.recycle(part1Path, RecycleType.MOVE, false); Assert.assertEquals(ret, 1); Path cmPart1Path = ReplChangeManager.getCMPath(hiveConf, path1Chksum); assertTrue(cmPart1Path.getFileSystem(hiveConf).exists(cmPart1Path)); @@ -242,7 +243,7 @@ public void testRecycleNonPartTable() throws Exception { ReplChangeManager cm = ReplChangeManager.getInstance(hiveConf); // verify cm.recycle(Path) api moves file to cmroot dir - cm.recycle(filePath1, false); + cm.recycle(filePath1, RecycleType.MOVE, false); assertFalse(filePath1.getFileSystem(hiveConf).exists(filePath1)); Path cmPath1 = ReplChangeManager.getCMPath(hiveConf, fileChksum1); @@ -293,9 +294,9 @@ public void testClearer() throws Exception { createFile(part32, "testClearer32"); String fileChksum32 = ReplChangeManager.checksumFor(part32, fs); - ReplChangeManager.getInstance(hiveConf).recycle(dirTbl1, false); - ReplChangeManager.getInstance(hiveConf).recycle(dirTbl2, false); - ReplChangeManager.getInstance(hiveConf).recycle(dirTbl3, true); + ReplChangeManager.getInstance(hiveConf).recycle(dirTbl1, RecycleType.MOVE, false); + ReplChangeManager.getInstance(hiveConf).recycle(dirTbl2, RecycleType.MOVE, false); + ReplChangeManager.getInstance(hiveConf).recycle(dirTbl3, RecycleType.MOVE, true); assertTrue(fs.exists(ReplChangeManager.getCMPath(hiveConf, fileChksum11))); assertTrue(fs.exists(ReplChangeManager.getCMPath(hiveConf, fileChksum12))); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index 4fa45ae..0591dfd 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -1655,6 +1655,131 @@ public void testInsertOverwriteOnPartitionedTableWithCM() throws IOException { } @Test + public void testRenameTableWithCM() throws IOException { + String testName = "renameTableWithCM"; + LOG.info("Testing " + testName); + String dbName = testName + "_" + tid; + + run("CREATE DATABASE " + dbName); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); + run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE"); + + advanceDumpDir(); + run("REPL DUMP " + dbName); + String replDumpLocn = getResult(0, 0); + String replDumpId = getResult(0, 1, true); + LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + + String[] unptn_data = new String[] { "ten", "twenty" }; + String[] ptn_data_1 = new String[] { "fifteen", "fourteen" }; + String[] ptn_data_2 = new String[] { "fifteen", "seventeen" }; + + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')"); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')"); + + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[0] + "')"); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[1] + "')"); + + run("ALTER TABLE " + dbName + ".ptned ADD PARTITION (b=2)"); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[0] + "')"); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[1] + "')"); + + // Get the last repl ID corresponding to all insert events except RENAME. + advanceDumpDir(); + run("REPL DUMP " + dbName + " FROM " + replDumpId); + String lastDumpIdWithoutRename = getResult(0, 1); + + run("ALTER TABLE " + dbName + ".unptned RENAME TO " + dbName + ".unptned_renamed"); + run("ALTER TABLE " + dbName + ".ptned RENAME TO " + dbName + ".ptned_renamed"); + + advanceDumpDir(); + run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + lastDumpIdWithoutRename); + String incrementalDumpLocn = getResult(0, 0); + String incrementalDumpId = getResult(0, 1, true); + LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); + replDumpId = incrementalDumpId; + + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); + verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", ptn_data_1); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2) ORDER BY a", ptn_data_2); + + advanceDumpDir(); + run("REPL DUMP " + dbName + " FROM " + replDumpId); + incrementalDumpLocn = getResult(0, 0); + incrementalDumpId = getResult(0, 1, true); + LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); + replDumpId = incrementalDumpId; + + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); + verifyFail("SELECT a from " + dbName + "_dupe.unptned ORDER BY a"); + verifyFail("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a"); + verifyRun("SELECT a from " + dbName + "_dupe.unptned_renamed ORDER BY a", unptn_data); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_renamed where (b=1) ORDER BY a", ptn_data_1); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_renamed where (b=2) ORDER BY a", ptn_data_2); + } + + @Test + public void testRenamePartitionWithCM() throws IOException { + String testName = "renamePartitionWithCM"; + LOG.info("Testing " + testName); + String dbName = testName + "_" + tid; + + run("CREATE DATABASE " + dbName); + run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE"); + + advanceDumpDir(); + run("REPL DUMP " + dbName); + String replDumpLocn = getResult(0, 0); + String replDumpId = getResult(0, 1, true); + LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + + String[] empty = new String[] {}; + String[] ptn_data_1 = new String[] { "fifteen", "fourteen" }; + String[] ptn_data_2 = new String[] { "fifteen", "seventeen" }; + + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[0] + "')"); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[1] + "')"); + + run("ALTER TABLE " + dbName + ".ptned ADD PARTITION (b=2)"); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[0] + "')"); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[1] + "')"); + + // Get the last repl ID corresponding to all insert events except RENAME. + advanceDumpDir(); + run("REPL DUMP " + dbName + " FROM " + replDumpId); + String lastDumpIdWithoutRename = getResult(0, 1); + + run("ALTER TABLE " + dbName + ".ptned PARTITION (b=2) RENAME TO PARTITION (b=10)"); + + advanceDumpDir(); + run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + lastDumpIdWithoutRename); + String incrementalDumpLocn = getResult(0, 0); + String incrementalDumpId = getResult(0, 1, true); + LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); + replDumpId = incrementalDumpId; + + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", ptn_data_1); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2) ORDER BY a", ptn_data_2); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=10) ORDER BY a", empty); + + advanceDumpDir(); + run("REPL DUMP " + dbName + " FROM " + replDumpId); + incrementalDumpLocn = getResult(0, 0); + incrementalDumpId = getResult(0, 1, true); + LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); + replDumpId = incrementalDumpId; + + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", ptn_data_1); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=10) ORDER BY a", ptn_data_2); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2) ORDER BY a", empty); + } + + @Test public void testViewsReplication() throws IOException { String testName = "viewsReplication"; String dbName = createDB(testName); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 7c1be8c..ea90a8e 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -220,10 +220,10 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, + newDbName + "." + newTblName + " already exists : " + destPath); } // check that src exists and also checks permissions necessary, rename src to dest - if (srcFs.exists(srcPath) && srcFs.rename(srcPath, destPath)) { + if (srcFs.exists(srcPath) && wh.renameDir(srcPath, destPath, true)) { dataWasMoved = true; } - } catch (IOException e) { + } catch (IOException | MetaException e) { LOG.error("Alter Table operation for " + dbname + "." + name + " failed.", e); throw new InvalidOperationException("Alter Table operation for " + dbname + "." + name + " failed to move data due to: '" + getSimpleMessage(e) @@ -346,13 +346,13 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, } /** - * RemoteExceptionS from hadoop RPC wrap the stack trace into e.getMessage() which makes - * logs/stack traces confusing. + * MetaException that encapsulates error message from RemoteException from hadoop RPC which wrap + * the stack trace into e.getMessage() which makes logs/stack traces confusing. * @param ex * @return */ - String getSimpleMessage(IOException ex) { - if(ex instanceof RemoteException) { + String getSimpleMessage(Exception ex) { + if(ex instanceof MetaException) { String msg = ex.getMessage(); if(msg == null || !msg.contains("\n")) { return msg; @@ -515,7 +515,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String } //rename the data directory - wh.renameDir(srcPath, destPath); + wh.renameDir(srcPath, destPath, true); LOG.info("Partition directory rename from " + srcPath + " to " + destPath + " done."); dataWasMoved = true; } @@ -569,7 +569,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String LOG.error("Revert the data move in renaming a partition."); try { if (destFs.exists(destPath)) { - wh.renameDir(destPath, srcPath); + wh.renameDir(destPath, srcPath, false); } } catch (MetaException me) { LOG.error("Failed to restore partition data from " + destPath + " to " + srcPath diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 4938fef..9765ec2 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -3173,7 +3173,7 @@ public Partition exchange_partition(Map partitionSpecs, * TODO: Use the hard link feature of hdfs * once https://issues.apache.org/jira/browse/HDFS-3370 is done */ - pathCreated = wh.renameDir(sourcePath, destPath); + pathCreated = wh.renameDir(sourcePath, destPath, false); // Setting success to false to make sure that if the listener fails, rollback happens. success = false; @@ -3200,7 +3200,7 @@ public Partition exchange_partition(Map partitionSpecs, if (!success || !pathCreated) { ms.rollbackTransaction(); if (pathCreated) { - wh.renameDir(destPath, sourcePath); + wh.renameDir(destPath, sourcePath, false); } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java index 6f17d23..b1edc8f 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java @@ -58,6 +58,11 @@ static final String REMAIN_IN_TRASH_TAG = "user.remain-in-trash"; private static final String URI_FRAGMENT_SEPARATOR = "#"; + public enum RecycleType { + MOVE, + COPY + } + public static ReplChangeManager getInstance(HiveConf hiveConf) throws MetaException { if (instance == null) { instance = new ReplChangeManager(hiveConf); @@ -101,43 +106,18 @@ public boolean accept(Path p){ } }; - void addFile(Path path) throws MetaException { - if (!enabled) { - return; - } - try { - if (fs.isDirectory(path)) { - throw new IllegalArgumentException(path + " cannot be a directory"); - } - Path cmPath = getCMPath(hiveConf, checksumFor(path, fs)); - boolean copySuccessful = FileUtils - .copy(path.getFileSystem(hiveConf), path, cmPath.getFileSystem(hiveConf), cmPath, false, - false, hiveConf); - if (!copySuccessful) { - LOG.debug("A file with the same content of " + path.toString() + " already exists, ignore"); - } else { - fs.setOwner(cmPath, msUser, msGroup); - try { - fs.setXAttr(cmPath, ORIG_LOC_TAG, path.toString().getBytes()); - } catch (UnsupportedOperationException e) { - LOG.warn("Error setting xattr for " + path.toString()); - } - } - } catch (Exception exception) { - throw new MetaException(StringUtils.stringifyException(exception)); - } - } - - /*** * Move a path into cmroot. If the path is a directory (of a partition, or table if nonpartitioned), * recursively move files inside directory to cmroot. Note the table must be managed table * @param path a single file or directory - * @param ifPurge if the file should skip Trash when delete + * @param type if the files to be copied or moved to cmpath. + * Copy is costly but preserve the source file + * @param ifPurge if the file should skip Trash when move/delete source file. + * This is referred only if type is MOVE. * @return int * @throws MetaException */ - int recycle(Path path, boolean ifPurge) throws MetaException { + int recycle(Path path, RecycleType type, boolean ifPurge) throws MetaException { if (!enabled) { return 0; } @@ -148,14 +128,11 @@ int recycle(Path path, boolean ifPurge) throws MetaException { if (fs.isDirectory(path)) { FileStatus[] files = fs.listStatus(path, hiddenFileFilter); for (FileStatus file : files) { - count += recycle(file.getPath(), ifPurge); + count += recycle(file.getPath(), type, ifPurge); } } else { - Path cmPath = getCMPath(hiveConf, checksumFor(path, fs)); - - if (LOG.isDebugEnabled()) { - LOG.debug("Moving " + path.toString() + " to " + cmPath.toString()); - } + String fileCheckSum = checksumFor(path, fs); + Path cmPath = getCMPath(hiveConf, fileCheckSum); // set timestamp before moving to cmroot, so we can // avoid race condition CM remove the file before setting @@ -163,17 +140,42 @@ int recycle(Path path, boolean ifPurge) throws MetaException { long now = System.currentTimeMillis(); fs.setTimes(path, now, now); - boolean succ = fs.rename(path, cmPath); - // Ignore if a file with same content already exist in cmroot - // We might want to setXAttr for the new location in the future - if (!succ) { - if (LOG.isDebugEnabled()) { - LOG.debug("A file with the same content of " + path.toString() + " already exists, ignore"); - } - // Need to extend the tenancy if we saw a newer file with the same content - fs.setTimes(cmPath, now, now); + boolean success = false; + if (fs.exists(cmPath) && fileCheckSum.equalsIgnoreCase(checksumFor(cmPath, fs))) { + // If already a file with same checksum exists in cmPath, just ignore the copy/move + // Also, mark the operation is unsuccessful to notify that file with same name already + // exist which will ensure the timestamp of cmPath is updated to avoid clean-up by + // CM cleaner. + success = false; } else { + switch (type) { + case MOVE: { + if (LOG.isDebugEnabled()) { + LOG.debug("Moving {} to {}", path.toString(), cmPath.toString()); + } + // Rename fails if the file with same name already exist. + success = fs.rename(path, cmPath); + break; + } + case COPY: { + if (LOG.isDebugEnabled()) { + LOG.debug("Copying {} to {}", path.toString(), cmPath.toString()); + } + // It is possible to have a file with same checksum in cmPath but the content is + // partially copied or corrupted. In this case, just overwrite the existing file with + // new one. + success = FileUtils.copy(fs, path, fs, cmPath, false, true, hiveConf); + break; + } + default: + // Operation fails as invalid input + break; + } + } + // Ignore if a file with same content already exist in cmroot + // We might want to setXAttr for the new location in the future + if (success) { // set the file owner to hive (or the id metastore run as) fs.setOwner(cmPath, msUser, msGroup); @@ -184,19 +186,26 @@ int recycle(Path path, boolean ifPurge) throws MetaException { try { fs.setXAttr(cmPath, ORIG_LOC_TAG, path.toString().getBytes()); } catch (UnsupportedOperationException e) { - LOG.warn("Error setting xattr for " + path.toString()); + LOG.warn("Error setting xattr for {}", path.toString()); } count++; + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("A file with the same content of {} already exists, ignore", path.toString()); + } + // Need to extend the tenancy if we saw a newer file with the same content + fs.setTimes(cmPath, now, now); } + // Tag if we want to remain in trash after deletion. // If multiple files share the same content, then // any file claim remain in trash would be granted - if (!ifPurge) { + if ((type == RecycleType.MOVE) && !ifPurge) { try { fs.setXAttr(cmPath, REMAIN_IN_TRASH_TAG, new byte[]{0}); } catch (UnsupportedOperationException e) { - LOG.warn("Error setting xattr for " + cmPath.toString()); + LOG.warn("Error setting xattr for {}", cmPath.toString()); } } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java b/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java index fcbcf62..778550b 100755 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.ReplChangeManager.RecycleType; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -196,8 +197,13 @@ public boolean mkdirs(Path f) throws MetaException { return false; } - public boolean renameDir(Path sourcePath, Path destPath) throws MetaException { + public boolean renameDir(Path sourcePath, Path destPath, boolean needCmRecycle) throws MetaException { try { + if (needCmRecycle) { + // Copy the source files to cmroot. As the client will move the source files to another + // location, we should make a copy of the files to cmroot instead of moving it. + cm.recycle(sourcePath, RecycleType.COPY, true); + } FileSystem fs = getFs(sourcePath); return FileUtils.rename(fs, sourcePath, destPath, conf); } catch (Exception ex) { @@ -207,7 +213,7 @@ public boolean renameDir(Path sourcePath, Path destPath) throws MetaException { } void addToChangeManagement(Path file) throws MetaException { - cm.addFile(file); + cm.recycle(file, RecycleType.COPY, true); } public boolean deleteDir(Path f, boolean recursive) throws MetaException { @@ -215,13 +221,13 @@ public boolean deleteDir(Path f, boolean recursive) throws MetaException { } public boolean deleteDir(Path f, boolean recursive, boolean ifPurge) throws MetaException { - cm.recycle(f, ifPurge); + cm.recycle(f, RecycleType.MOVE, ifPurge); FileSystem fs = getFs(f); return fsHandler.deleteDir(fs, f, recursive, ifPurge, conf); } public void recycleDirToCmPath(Path f, boolean ifPurge) throws MetaException { - cm.recycle(f, ifPurge); + cm.recycle(f, RecycleType.MOVE, ifPurge); return; } diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out index 580da46..06f8408 100644 --- a/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out +++ b/ql/src/test/results/clientpositive/encrypted/encryption_move_tbl.q.out @@ -77,7 +77,7 @@ PREHOOK: query: ALTER TABLE default.encrypted_table RENAME TO encrypted_db.encry PREHOOK: type: ALTERTABLE_RENAME PREHOOK: Input: default@encrypted_table PREHOOK: Output: default@encrypted_table -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. Alter Table operation for default.encrypted_table failed to move data due to: '/build/ql/test/data/warehouse/encrypted_table can't be moved into an encryption zone.' See hive log file for details. +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. Alter Table operation for default.encrypted_table failed to move data due to: 'Got exception: org.apache.hadoop.ipc.RemoteException /build/ql/test/data/warehouse/encrypted_table can't be moved into an encryption zone.' See hive log file for details. PREHOOK: query: SHOW TABLES PREHOOK: type: SHOWTABLES PREHOOK: Input: database:default @@ -206,7 +206,7 @@ PREHOOK: query: ALTER TABLE encrypted_db_outloc.renamed_encrypted_table RENAME T PREHOOK: type: ALTERTABLE_RENAME PREHOOK: Input: encrypted_db_outloc@renamed_encrypted_table PREHOOK: Output: encrypted_db_outloc@renamed_encrypted_table -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. Alter Table operation for encrypted_db_outloc.renamed_encrypted_table failed to move data due to: '/build/ql/test/data/specified_db_location/renamed_encrypted_table can't be moved from an encryption zone.' See hive log file for details. +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. Alter Table operation for encrypted_db_outloc.renamed_encrypted_table failed to move data due to: 'Got exception: org.apache.hadoop.ipc.RemoteException /build/ql/test/data/specified_db_location/renamed_encrypted_table can't be moved from an encryption zone.' See hive log file for details. PREHOOK: query: SHOW TABLES PREHOOK: type: SHOWTABLES PREHOOK: Input: database:encrypted_db_outloc