diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java index 24657b164d..309009478a 100644 --- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java @@ -740,18 +740,26 @@ public static boolean rename(FileSystem fs, Path sourcePath, } /** - * @param fs1 - * @param fs2 + * Check if path1 and path2 are on the same file system + * @param fs1 file system + * @param path1 path on file system fs1 + * @param fs2 file system + * @param path2 path on file system fs2 * @return return true if both file system arguments point to same file system */ - public static boolean equalsFileSystem(FileSystem fs1, FileSystem fs2) { + public static boolean equalsFileSystem(FileSystem fs1, Path path1, + FileSystem fs2, Path path2) + throws IOException { //When file system cache is disabled, you get different FileSystem objects // for same file system, so '==' can't be used in such cases //FileSystem api doesn't have a .equals() function implemented, so using //the uri for comparison. FileSystem already uses uri+Configuration for //equality in its CACHE . //Once equality has been added in HDFS-9159, we should make use of it - return fs1.getUri().equals(fs2.getUri()); + URI resolvedPath1 = fs1.resolvePath(path1).toUri(); + URI resolvedPath2 = fs2.resolvePath(path2).toUri(); + return resolvedPath1.getScheme().equalsIgnoreCase(resolvedPath2.getScheme()) && + resolvedPath1.getAuthority().equalsIgnoreCase(resolvedPath2.getAuthority()); } /** diff --git a/common/src/test/org/apache/hadoop/hive/common/TestFileUtils.java b/common/src/test/org/apache/hadoop/hive/common/TestFileUtils.java index 15e74db366..d5c244ca3e 100644 --- a/common/src/test/org/apache/hadoop/hive/common/TestFileUtils.java +++ b/common/src/test/org/apache/hadoop/hive/common/TestFileUtils.java @@ -35,6 +35,7 @@ import java.util.HashSet; import java.util.Set; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; @@ -264,4 +265,30 @@ public void testCopyWithDistCpAs() throws IOException { equalsIgnoreCase("Distcp is called with doAsUser and delete source set as true")); } } + + @Test + public void testEqualsFileSystem() throws IOException { + HiveConf conf = new HiveConf(TestFileUtils.class); + + FileSystem mockFs1 = mock(FileSystem.class); + FileSystem mockFs2 = mock(FileSystem.class); + Path path1 = new Path("viewfs://ns/path1"); + Path path2 = new Path("viewfs://ns/path2"); + + when(mockFs1.resolvePath(path1)) + .thenReturn(new Path("hdfs://ns1/path1")); + when(mockFs2.resolvePath(path2)) + .thenReturn(new Path("hdfs://ns1/path2")); + + Assert.assertTrue(FileUtils.equalsFileSystem(mockFs1, path1, mockFs2, path2)); + + when(mockFs1.resolvePath(path1)) + .thenReturn(new Path("hdfs://ns1/path1")); + when(mockFs2.resolvePath(path2)) + .thenReturn(new Path("hdfs://ns2/path2")); + + Assert.assertFalse(FileUtils.equalsFileSystem(mockFs1, path1, mockFs2, path2)); + + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 9ef88aca8a..1e79d6bd43 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -2338,7 +2338,7 @@ private Partition loadPartitionInternal(Path loadPath, Table tbl, Map partCols, List vals, } /** - * Determine if two objects reference the same file system. - * @param fs1 first file system - * @param fs2 second file system + * Check if path1 and path2 are on the same file system + * @param fs1 file system + * @param path1 path on file system fs1 + * @param fs2 file system + * @param path2 path on file system fs2 * @return return true if both file system arguments point to same file system */ - public static boolean equalsFileSystem(FileSystem fs1, FileSystem fs2) { + public static boolean equalsFileSystem(FileSystem fs1, Path path1, + FileSystem fs2, Path path2) + throws IOException { //When file system cache is disabled, you get different FileSystem objects // for same file system, so '==' can't be used in such cases //FileSystem api doesn't have a .equals() function implemented, so using //the uri for comparison. FileSystem already uses uri+Configuration for //equality in its CACHE . //Once equality has been added in HDFS-9159, we should make use of it - return fs1.getUri().equals(fs2.getUri()); + URI resolvedPath1 = fs1.resolvePath(path1).toUri(); + URI resolvedPath2 = fs2.resolvePath(path2).toUri(); + return resolvedPath1.getScheme().equalsIgnoreCase(resolvedPath2.getScheme()) && + resolvedPath1.getAuthority().equalsIgnoreCase(resolvedPath2.getAuthority()); } /** diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index d68e76d594..cd8eec8cbe 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -266,10 +266,17 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam // check that destination does not exist otherwise we will be // overwriting data // check that src and dest are on the same file system - if (!FileUtils.equalsFileSystem(srcFs, destFs)) { - throw new InvalidOperationException("table new location " + destPath - + " is on a different file system than the old location " - + srcPath + ". This operation is not supported"); + try { + if (!FileUtils.equalsFileSystem(srcFs, srcPath, destFs, destPath)) { + throw new InvalidOperationException("table new location " + destPath + + " is on a different file system than the old location " + + srcPath + ". This operation is not supported"); + } + } catch(IOException e) { + LOG.error("Failed to check if " + srcPath + "," + destPath + + "are on same file system", e); + throw new InvalidOperationException("Alter Table operation for " + dbname + "." + name + + " failed due to: '" + getSimpleMessage(e) +"'"); } try { @@ -654,10 +661,17 @@ public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, Str srcFs = wh.getFs(srcPath); destFs = wh.getFs(destPath); // check that src and dest are on the same file system - if (!FileUtils.equalsFileSystem(srcFs, destFs)) { - throw new InvalidOperationException("New table location " + destPath - + " is on a different file system than the old location " - + srcPath + ". This operation is not supported."); + try { + if (!FileUtils.equalsFileSystem(srcFs, srcPath, destFs, destPath)) { + throw new InvalidOperationException("New table location " + destPath + + " is on a different file system than the old location " + + srcPath + ". This operation is not supported."); + } + } catch(IOException e) { + LOG.error("Failed to check if " + srcPath + "," + destPath + + "are on same file system", e); + throw new InvalidOperationException("Alter partition operation for " + dbname + "." + name + + " failed due to: '" + getSimpleMessage(e) +"'"); } try {