diff --git a/common/pom.xml b/common/pom.xml
index e1c15ee..84bb1e5 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -157,7 +157,13 @@
commons-logging
-
+
+
+ org.apache.hadoop
+ hadoop-hdfs
+ ${hadoop.version}
+ true
+
com.google.code.tempus-fugit
diff --git a/common/src/java/org/apache/hadoop/hive/common/BlobStorageUtils.java b/common/src/java/org/apache/hadoop/hive/common/BlobStorageUtils.java
index e6a17cb..3fd001d 100644
--- a/common/src/java/org/apache/hadoop/hive/common/BlobStorageUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/BlobStorageUtils.java
@@ -24,21 +24,23 @@
import java.util.Collection;
+
/**
* Utilities for different blob (object) storage systems
*/
public class BlobStorageUtils {
+
private static final boolean DISABLE_BLOBSTORAGE_AS_SCRATCHDIR = false;
public static boolean isBlobStoragePath(final Configuration conf, final Path path) {
return path != null && isBlobStorageScheme(conf, path.toUri().getScheme());
}
- public static boolean isBlobStorageFileSystem(final Configuration conf, final FileSystem fs) {
- return fs != null && isBlobStorageScheme(conf, fs.getScheme());
+ static boolean isBlobStorageFileSystem(final Configuration conf, final FileSystem fs) {
+ return fs != null && fs.getUri() != null && isBlobStorageScheme(conf, fs.getUri().getScheme());
}
- public static boolean isBlobStorageScheme(final Configuration conf, final String scheme) {
+ static boolean isBlobStorageScheme(final Configuration conf, final String scheme) {
Collection supportedBlobStoreSchemes =
conf.getStringCollection(HiveConf.ConfVars.HIVE_BLOBSTORE_SUPPORTED_SCHEMES.varname);
@@ -61,4 +63,8 @@ public static boolean areOptimizationsEnabled(final Configuration conf) {
HiveConf.ConfVars.HIVE_BLOBSTORE_OPTIMIZATIONS_ENABLED.defaultBoolVal
);
}
+
+ public static boolean shouldSetPerms(Configuration conf, FileSystem fs) {
+ return !(BlobStorageUtils.areOptimizationsEnabled(conf) && BlobStorageUtils.isBlobStorageFileSystem(conf, fs));
+ }
}
diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
index e586015..b513eed 100644
--- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
+++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
@@ -46,13 +46,11 @@
import org.apache.hadoop.fs.Trash;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConfUtil;
import org.apache.hadoop.hive.io.HdfsUtils;
import org.apache.hadoop.hive.shims.HadoopShims;
import org.apache.hadoop.hive.shims.ShimLoader;
import org.apache.hadoop.hive.shims.Utils;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
import org.apache.hive.common.util.ShutdownHookManager;
import org.slf4j.Logger;
@@ -509,11 +507,29 @@ public static boolean isOwnerOfFileHierarchy(FileSystem fs, FileStatus fileStatu
/**
* Creates the directory and all necessary parent directories.
+ *
+ * @param fs FileSystem to use
+ * @param f path to create.
+ * @param conf Hive configuration
+ *
+ * @return true if directory created successfully. False otherwise, including if it exists.
+ *
+ * @throws IOException exception in creating the directory
+ */
+ public static boolean mkdir(FileSystem fs, Path f, Configuration conf) throws IOException {
+ return mkdir(fs, f, PermissionsInheritor.shouldInheritPerms(conf, fs), conf);
+ }
+
+ /**
+ * Creates the directory and all necessary parent directories.
+ *
* @param fs FileSystem to use
* @param f path to create.
* @param inheritPerms whether directory inherits the permission of the last-existing parent path
* @param conf Hive configuration
+ *
* @return true if directory created successfully. False otherwise, including if it exists.
+ *
* @throws IOException exception in creating the directory
*/
public static boolean mkdir(FileSystem fs, Path f, boolean inheritPerms, Configuration conf) throws IOException {
@@ -601,9 +617,10 @@ static boolean copy(FileSystem srcFS, Path src,
copied = FileUtil.copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf);
}
- boolean inheritPerms = conf.getBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
+ boolean inheritPerms = PermissionsInheritor.shouldInheritPerms(conf, dstFS);
if (copied && inheritPerms) {
- HdfsUtils.setFullFileStatus(conf, new HdfsUtils.HadoopFileStatus(conf, dstFS, dst.getParent()), dstFS, dst, true);
+ PermissionsInheritor.inheritPerms(conf, new HdfsUtils.HadoopFileStatus(conf, dstFS, dst.getParent()), dstFS, dst,
+ true);
}
return copied;
}
diff --git a/common/src/java/org/apache/hadoop/hive/common/PermissionsInheritor.java b/common/src/java/org/apache/hadoop/hive/common/PermissionsInheritor.java
new file mode 100644
index 0000000..6dcb56e
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/common/PermissionsInheritor.java
@@ -0,0 +1,26 @@
+package org.apache.hadoop.hive.common;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.io.HdfsUtils;
+
+
+public class PermissionsInheritor {
+
+ public static boolean shouldInheritPerms(Configuration conf, FileSystem fs) {
+ return HiveConf.getBoolVar(conf,
+ HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS) && BlobStorageUtils.shouldSetPerms(conf, fs);
+ }
+
+ public static void inheritPerms(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus, FileSystem fs, Path target,
+ boolean recursive) {
+ inheritPerms(conf, sourceStatus, null, fs, target, recursive);
+ }
+
+ public static void inheritPerms(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus, String targetGroup,
+ FileSystem fs, Path target, boolean recursive) {
+ HdfsUtils.setFullFileStatus(conf, sourceStatus, targetGroup, fs, target, recursive);
+ }
+}
diff --git a/common/src/java/org/apache/hadoop/hive/io/HdfsUtils.java b/common/src/java/org/apache/hadoop/hive/io/HdfsUtils.java
new file mode 100644
index 0000000..568d2b7
--- /dev/null
+++ b/common/src/java/org/apache/hadoop/hive/io/HdfsUtils.java
@@ -0,0 +1,248 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.io;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.commons.lang.ArrayUtils;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+
+import org.apache.hadoop.hive.common.BlobStorageUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Joiner;
+import com.google.common.base.Objects;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Iterables;
+
+public class HdfsUtils {
+
+ private static final Logger LOG = LoggerFactory.getLogger("shims.HdfsUtils");
+
+ // TODO: this relies on HDFS not changing the format; we assume if we could get inode ID, this
+ // is still going to work. Otherwise, file IDs can be turned off. Later, we should use
+ // as public utility method in HDFS to obtain the inode-based path.
+ private static final String HDFS_ID_PATH_PREFIX = "/.reserved/.inodes/";
+
+ public static Path getFileIdPath(
+ FileSystem fileSystem, Path path, long fileId) {
+ return (fileSystem instanceof DistributedFileSystem)
+ ? new Path(HDFS_ID_PATH_PREFIX + fileId) : path;
+ }
+
+ /**
+ * Copy the permissions, group, and ACLs from a source {@link HadoopFileStatus} to a target {@link Path}. This method
+ * will only log a warning if permissions cannot be set, no exception will be thrown.
+ *
+ * @param conf the {@link Configuration} used when setting permissions and ACLs
+ * @param sourceStatus the source {@link HadoopFileStatus} to copy permissions and ACLs from
+ * @param fs the {@link FileSystem} that contains the target {@link Path}
+ * @param target the {@link Path} to copy permissions, group, and ACLs to
+ * @param recursion recursively set permissions and ACLs on the target {@link Path}
+ */
+ public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus,
+ FileSystem fs, Path target, boolean recursion) {
+ if (BlobStorageUtils.shouldSetPerms(conf, fs)) {
+ setFullFileStatus(conf, sourceStatus, null, fs, target, recursion);
+ }
+ }
+
+ /**
+ * Copy the permissions, group, and ACLs from a source {@link HadoopFileStatus} to a target {@link Path}. This method
+ * will only log a warning if permissions cannot be set, no exception will be thrown.
+ *
+ * @param conf the {@link Configuration} used when setting permissions and ACLs
+ * @param sourceStatus the source {@link HadoopFileStatus} to copy permissions and ACLs from
+ * @param targetGroup the group of the target {@link Path}, if this is set and it is equal to the source group, an
+ * extra set group operation is avoided
+ * @param fs the {@link FileSystem} that contains the target {@link Path}
+ * @param target the {@link Path} to copy permissions, group, and ACLs to
+ * @param recursion recursively set permissions and ACLs on the target {@link Path}
+ */
+ public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus,
+ String targetGroup, FileSystem fs, Path target, boolean recursion) {
+ if (BlobStorageUtils.shouldSetPerms(conf, fs)) {
+ setFullFileStatus(conf, sourceStatus, targetGroup, fs, target, recursion, recursion ? new FsShell() : null);
+ }
+ }
+
+ @VisibleForTesting
+ static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus,
+ String targetGroup, FileSystem fs, Path target, boolean recursion, FsShell fsShell) {
+ try {
+ FileStatus fStatus = sourceStatus.getFileStatus();
+ String group = fStatus.getGroup();
+ boolean aclEnabled = Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true");
+ FsPermission sourcePerm = fStatus.getPermission();
+ List aclEntries = null;
+ if (aclEnabled) {
+ if (sourceStatus.getAclEntries() != null) {
+ LOG.trace(sourceStatus.getAclStatus().toString());
+ aclEntries = new ArrayList<>(sourceStatus.getAclEntries());
+ removeBaseAclEntries(aclEntries);
+
+ //the ACL api's also expect the tradition user/group/other permission in the form of ACL
+ aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, sourcePerm.getUserAction()));
+ aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, sourcePerm.getGroupAction()));
+ aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, sourcePerm.getOtherAction()));
+ }
+ }
+
+ if (recursion) {
+ //use FsShell to change group, permissions, and extended ACL's recursively
+ fsShell.setConf(conf);
+ //If there is no group of a file, no need to call chgrp
+ if (group != null && !group.isEmpty()) {
+ run(fsShell, new String[]{"-chgrp", "-R", group, target.toString()});
+ }
+ if (aclEnabled) {
+ if (null != aclEntries) {
+ //Attempt extended Acl operations only if its enabled, 8791but don't fail the operation regardless.
+ try {
+ //construct the -setfacl command
+ String aclEntry = Joiner.on(",").join(aclEntries);
+ run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, target.toString()});
+
+ } catch (Exception e) {
+ LOG.info("Skipping ACL inheritance: File system for path " + target + " " +
+ "does not support ACLs but dfs.namenode.acls.enabled is set to true. ");
+ LOG.debug("The details are: " + e, e);
+ }
+ }
+ } else {
+ String permission = Integer.toString(sourcePerm.toShort(), 8);
+ run(fsShell, new String[]{"-chmod", "-R", permission, target.toString()});
+ }
+ } else {
+ if (group != null && !group.isEmpty()) {
+ if (targetGroup == null ||
+ !group.equals(targetGroup)) {
+ fs.setOwner(target, null, group);
+ }
+ }
+ if (aclEnabled) {
+ if (null != aclEntries) {
+ fs.setAcl(target, aclEntries);
+ }
+ } else {
+ fs.setPermission(target, sourcePerm);
+ }
+ }
+ } catch (Exception e) {
+ LOG.warn(
+ "Unable to inherit permissions for file " + target + " from file " + sourceStatus.getFileStatus().getPath(),
+ e.getMessage());
+ LOG.debug("Exception while inheriting permissions", e);
+ }
+ }
+
+ /**
+ * Create a new AclEntry with scope, type and permission (no name).
+ *
+ * @param scope
+ * AclEntryScope scope of the ACL entry
+ * @param type
+ * AclEntryType ACL entry type
+ * @param permission
+ * FsAction set of permissions in the ACL entry
+ * @return AclEntry new AclEntry
+ */
+ private static AclEntry newAclEntry(AclEntryScope scope, AclEntryType type,
+ FsAction permission) {
+ return new AclEntry.Builder().setScope(scope).setType(type)
+ .setPermission(permission).build();
+ }
+
+ /**
+ * Removes basic permission acls (unamed acls) from the list of acl entries
+ * @param entries acl entries to remove from.
+ */
+ private static void removeBaseAclEntries(List entries) {
+ Iterables.removeIf(entries, new Predicate() {
+ @Override
+ public boolean apply(AclEntry input) {
+ if (input.getName() == null) {
+ return true;
+ }
+ return false;
+ }
+ });
+ }
+
+ private static void run(FsShell shell, String[] command) throws Exception {
+ LOG.debug(ArrayUtils.toString(command));
+ int retval = shell.run(command);
+ LOG.debug("Return value is :" + retval);
+ }
+
+ public static class HadoopFileStatus {
+
+ private final FileStatus fileStatus;
+ private final AclStatus aclStatus;
+
+ public HadoopFileStatus(Configuration conf, FileSystem fs, Path file) throws IOException {
+
+ FileStatus fileStatus = fs.getFileStatus(file);
+ AclStatus aclStatus = null;
+ if (Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true")) {
+ //Attempt extended Acl operations only if its enabled, but don't fail the operation regardless.
+ try {
+ aclStatus = fs.getAclStatus(file);
+ } catch (Exception e) {
+ LOG.info("Skipping ACL inheritance: File system for path " + file + " " +
+ "does not support ACLs but dfs.namenode.acls.enabled is set to true. ");
+ LOG.debug("The details are: " + e, e);
+ }
+ }
+ this.fileStatus = fileStatus;
+ this.aclStatus = aclStatus;
+ }
+
+ public FileStatus getFileStatus() {
+ return fileStatus;
+ }
+
+ public List getAclEntries() {
+ return aclStatus == null ? null : Collections.unmodifiableList(aclStatus.getEntries());
+ }
+
+ @VisibleForTesting
+ AclStatus getAclStatus() {
+ return this.aclStatus;
+ }
+ }
+}
diff --git a/common/src/test/org/apache/hadoop/hive/common/TestBlobStorageUtils.java b/common/src/test/org/apache/hadoop/hive/common/TestBlobStorageUtils.java
index 84a0d86..918ec95 100644
--- a/common/src/test/org/apache/hadoop/hive/common/TestBlobStorageUtils.java
+++ b/common/src/test/org/apache/hadoop/hive/common/TestBlobStorageUtils.java
@@ -64,18 +64,18 @@ public void testValidAndInvalidFileSystems() {
/* Valid FileSystem schemes */
- doReturn("s3a").when(fs).getScheme();
+ doReturn(URI.create("s3a:///")).when(fs).getUri();
assertTrue(isBlobStorageFileSystem(conf, fs));
- doReturn("swift").when(fs).getScheme();
+ doReturn(URI.create("swift:///")).when(fs).getUri();
assertTrue(isBlobStorageFileSystem(conf, fs));
/* Invalid FileSystem schemes */
- doReturn("hdfs").when(fs).getScheme();
+ doReturn(URI.create("hdfs:///")).when(fs).getUri();
assertFalse(isBlobStorageFileSystem(conf, fs));
- doReturn("").when(fs).getScheme();
+ doReturn(URI.create("")).when(fs).getUri();
assertFalse(isBlobStorageFileSystem(conf, fs));
assertFalse(isBlobStorageFileSystem(conf, null));
diff --git a/common/src/test/org/apache/hadoop/hive/common/TestPermissionInheritor.java b/common/src/test/org/apache/hadoop/hive/common/TestPermissionInheritor.java
new file mode 100644
index 0000000..453b32e
--- /dev/null
+++ b/common/src/test/org/apache/hadoop/hive/common/TestPermissionInheritor.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.common;
+
+import java.net.URI;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hive.conf.HiveConf;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertFalse;
+
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+
+public class TestPermissionInheritor {
+
+ private final HiveConf conf = new HiveConf();
+
+ @Before
+ public void setUp() {
+ conf.set(HiveConf.ConfVars.HIVE_BLOBSTORE_SUPPORTED_SCHEMES.varname, HiveConf.ConfVars.HIVE_BLOBSTORE_SUPPORTED_SCHEMES.getDefaultValue());
+ }
+
+ @Test
+ public void testShouldInheritPerms() {
+ conf.set(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS.varname, "true");
+ FileSystem fs = mock(FileSystem.class);
+ for (String blobScheme : conf.getStringCollection(HiveConf.ConfVars.HIVE_BLOBSTORE_SUPPORTED_SCHEMES.varname)) {
+ when(fs.getUri()).thenReturn(URI.create(blobScheme + ":///"));
+ assertFalse(PermissionsInheritor.shouldInheritPerms(conf, fs));
+ }
+
+ when(fs.getUri()).thenReturn(URI.create("hdfs:///"));
+ assertTrue(PermissionsInheritor.shouldInheritPerms(conf, fs));
+ }
+}
diff --git a/common/src/test/org/apache/hadoop/hive/io/TestHdfsUtils.java b/common/src/test/org/apache/hadoop/hive/io/TestHdfsUtils.java
new file mode 100644
index 0000000..79507e4
--- /dev/null
+++ b/common/src/test/org/apache/hadoop/hive/io/TestHdfsUtils.java
@@ -0,0 +1,203 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.io;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsPermission;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.junit.Test;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyList;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class TestHdfsUtils {
+
+ /**
+ * Tests that {@link HdfsUtils#setFullFileStatus(Configuration, HdfsUtils.HadoopFileStatus, String, FileSystem, Path, boolean)}
+ * does not throw an exception when setting the group and without recursion.
+ */
+ @Test
+ public void testSetFullFileStatusFailInheritGroup() throws IOException {
+ Configuration conf = new Configuration();
+ conf.set("dfs.namenode.acls.enabled", "false");
+
+ HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class);
+ FileStatus mockSourceStatus = mock(FileStatus.class);
+ FileSystem fs = mock(FileSystem.class);
+
+ when(mockSourceStatus.getGroup()).thenReturn("fakeGroup1");
+ when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus);
+ doThrow(RuntimeException.class).when(fs).setOwner(any(Path.class), any(String.class), any(String.class));
+
+ HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "fakeGroup2", fs, new Path("fakePath"), false);
+ verify(fs).setOwner(any(Path.class), any(String.class), any(String.class));
+ }
+
+ /**
+ * Tests that {@link HdfsUtils#setFullFileStatus(Configuration, HdfsUtils.HadoopFileStatus, FileSystem, Path, boolean)}
+ * does not thrown an exception when setting ACLs and without recursion.
+ */
+ @Test
+ public void testSetFullFileStatusFailInheritAcls() throws IOException {
+ Configuration conf = new Configuration();
+ conf.set("dfs.namenode.acls.enabled", "true");
+
+ HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class);
+ FileStatus mockSourceStatus = mock(FileStatus.class);
+ AclStatus mockAclStatus = mock(AclStatus.class);
+ FileSystem mockFs = mock(FileSystem.class);
+
+ when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777));
+ when(mockAclStatus.toString()).thenReturn("");
+ when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus);
+ when(mockHadoopFileStatus.getAclEntries()).thenReturn(new ArrayList());
+ when(mockHadoopFileStatus.getAclStatus()).thenReturn(mockAclStatus);
+ doThrow(RuntimeException.class).when(mockFs).setAcl(any(Path.class), any(List.class));
+
+ HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, mockFs, new Path("fakePath"), false);
+ verify(mockFs).setAcl(any(Path.class), any(List.class));
+ }
+
+ /**
+ * Tests that {@link HdfsUtils#setFullFileStatus(Configuration, HdfsUtils.HadoopFileStatus, FileSystem, Path, boolean)}
+ * does not thrown an exception when setting permissions and without recursion.
+ */
+ @Test
+ public void testSetFullFileStatusFailInheritPerms() throws IOException {
+ Configuration conf = new Configuration();
+ conf.set("dfs.namenode.acls.enabled", "false");
+
+ HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class);
+ FileStatus mockSourceStatus = mock(FileStatus.class);
+ FileSystem mockFs = mock(FileSystem.class);
+
+ when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777));
+ when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus);
+ doThrow(RuntimeException.class).when(mockFs).setPermission(any(Path.class), any(FsPermission.class));
+
+ HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, mockFs, new Path("fakePath"), false);
+ verify(mockFs).setPermission(any(Path.class), any(FsPermission.class));
+ }
+
+ /**
+ * Tests that {@link HdfsUtils#setFullFileStatus(Configuration, HdfsUtils.HadoopFileStatus, String, FileSystem, Path, boolean)}
+ * does not throw an exception when setting the group and with recursion.
+ */
+ @Test
+ public void testSetFullFileStatusFailInheritGroupRecursive() throws Exception {
+ Configuration conf = new Configuration();
+ conf.set("dfs.namenode.acls.enabled", "false");
+
+ String fakeSourceGroup = "fakeGroup1";
+ String fakeTargetGroup = "fakeGroup2";
+ Path fakeTarget = new Path("fakePath");
+ HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class);
+ FileStatus mockSourceStatus = mock(FileStatus.class);
+ FsShell mockFsShell = mock(FsShell.class);
+
+ when(mockSourceStatus.getGroup()).thenReturn(fakeSourceGroup);
+ when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus);
+ doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class));
+
+ HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, fakeTargetGroup, mock(FileSystem.class), fakeTarget,
+ true, mockFsShell);
+ verify(mockFsShell).run(new String[]{"-chgrp", "-R", fakeSourceGroup, fakeTarget.toString()});
+ }
+
+ /**
+ * Tests that {@link HdfsUtils#setFullFileStatus(Configuration, HdfsUtils.HadoopFileStatus, FileSystem, Path, boolean)}
+ * does not thrown an exception when setting ACLs and with recursion.
+ */
+ @Test
+ public void testSetFullFileStatusFailInheritAclsRecursive() throws Exception {
+ Configuration conf = new Configuration();
+ conf.set("dfs.namenode.acls.enabled", "true");
+
+ Path fakeTarget = new Path("fakePath");
+ HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class);
+ FileStatus mockSourceStatus = mock(FileStatus.class);
+ FsShell mockFsShell = mock(FsShell.class);
+ AclStatus mockAclStatus = mock(AclStatus.class);
+
+ when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777));
+ when(mockAclStatus.toString()).thenReturn("");
+ when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus);
+ when(mockHadoopFileStatus.getAclEntries()).thenReturn(new ArrayList());
+ when(mockHadoopFileStatus.getAclStatus()).thenReturn(mockAclStatus);
+ doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class));
+
+ HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "", mock(FileSystem.class), fakeTarget, true, mockFsShell);
+ verify(mockFsShell).run(new String[]{"-setfacl", "-R", "--set", any(String.class), fakeTarget.toString()});
+ }
+
+ /**
+ * Tests that {@link HdfsUtils#setFullFileStatus(Configuration, HdfsUtils.HadoopFileStatus, FileSystem, Path, boolean)}
+ * does not thrown an exception when setting permissions and with recursion.
+ */
+ @Test
+ public void testSetFullFileStatusFailInheritPermsRecursive() throws Exception {
+ Configuration conf = new Configuration();
+ conf.set("dfs.namenode.acls.enabled", "false");
+
+ Path fakeTarget = new Path("fakePath");
+ HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class);
+ FileStatus mockSourceStatus = mock(FileStatus.class);
+ FsShell mockFsShell = mock(FsShell.class);
+
+ when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777));
+ when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus);
+ doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class));
+
+ HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "", mock(FileSystem.class), fakeTarget,
+ true, mockFsShell);
+ verify(mockFsShell).run(new String[]{"-chmod", "-R", any(String.class), fakeTarget.toString()});
+ }
+
+ @Test
+ public void testSkipSetFullFileStatusIfBlobStore() throws IOException {
+ Configuration conf = new Configuration();
+ conf.set(HiveConf.ConfVars.HIVE_BLOBSTORE_SUPPORTED_SCHEMES.varname, "s3a");
+ FileSystem fs = mock(FileSystem.class);
+ when(fs.getUri()).thenReturn(URI.create("s3a:///"));
+ HdfsUtils.setFullFileStatus(conf, null, null, fs, null, false);
+
+ verify(fs, never()).getFileStatus(any(Path.class));
+ verify(fs, never()).listStatus(any(Path[].class));
+ verify(fs, never()).setPermission(any(Path.class), any(FsPermission.class));
+ verify(fs, never()).setAcl(any(Path.class), anyList());
+ verify(fs, never()).setOwner(any(Path.class), any(String.class), any(String.class));
+ }
+}
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java b/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
index a65a2e7..fda253d 100755
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
@@ -33,8 +33,11 @@
import java.util.regex.Pattern;
import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.hive.common.PermissionsInheritor;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileStatus;
@@ -184,11 +187,10 @@ public static String getQualifiedName(Partition partition) {
}
public boolean mkdirs(Path f, boolean inheritPermCandidate) throws MetaException {
- boolean inheritPerms = HiveConf.getBoolVar(conf,
- HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS) && inheritPermCandidate;
FileSystem fs = null;
try {
fs = getFs(f);
+ boolean inheritPerms = PermissionsInheritor.shouldInheritPerms(conf, fs) && inheritPermCandidate;
return FileUtils.mkdir(fs, f, inheritPerms, conf);
} catch (IOException e) {
MetaStoreUtils.logAndThrowMetaException(e);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
index d1d2789..08bba3d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
@@ -355,9 +355,7 @@ private Path getStagingDir(Path inputPath, boolean mkdir) {
if (mkdir) {
try {
- boolean inheritPerms = HiveConf.getBoolVar(conf,
- HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
- if (!FileUtils.mkdir(fs, dir, inheritPerms, conf)) {
+ if (!FileUtils.mkdir(fs, dir, conf)) {
throw new IllegalStateException("Cannot create staging directory '" + dir.toString() + "'");
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
index cbe0aca..2683f29 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java
@@ -70,8 +70,7 @@ public int execute(DriverContext driverContext) {
}
}
- boolean inheritPerms = conf.getBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
- if (!FileUtils.mkdir(dstFs, toPath, inheritPerms, conf)) {
+ if (!FileUtils.mkdir(dstFs, toPath, conf)) {
console.printError("Cannot make target directory: " + toPath.toString());
return 2;
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index 1802f37..1e17a46 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.HiveStatsUtils;
+import org.apache.hadoop.hive.common.PermissionsInheritor;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.io.HdfsUtils;
import org.apache.hadoop.hive.metastore.MetaStoreUtils;
@@ -181,8 +182,8 @@ private Path createTargetPath(Path targetPath, FileSystem fs) throws IOException
actualPath = actualPath.getParent();
}
fs.mkdirs(mkDirPath);
- if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS)) {
- HdfsUtils.setFullFileStatus(conf, new HdfsUtils.HadoopFileStatus(conf, fs, actualPath), fs, mkDirPath, true);
+ if (PermissionsInheritor.shouldInheritPerms(conf, fs)) {
+ PermissionsInheritor.inheritPerms(conf, new HdfsUtils.HadoopFileStatus(conf, fs, actualPath), fs, mkDirPath, true);
}
}
return deletePath;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java
index 4686e2c..b4e6639 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReplCopyTask.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hive.ql.exec;
+import org.apache.hadoop.hive.common.PermissionsInheritor;
import org.apache.hadoop.hive.metastore.ReplChangeManager;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.ql.parse.EximUtil;
@@ -106,8 +107,7 @@ protected int execute(DriverContext driverContext) {
srcFiles.addAll(Arrays.asList(srcs));
LOG.debug("ReplCopyTask numFiles:" + (srcFiles == null ? "null" : srcFiles.size()));
- boolean inheritPerms = conf.getBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
- if (!FileUtils.mkdir(dstFs, toPath, inheritPerms, conf)) {
+ if (!FileUtils.mkdir(dstFs, toPath, conf)) {
console.printError("Cannot make target directory: " + toPath.toString());
return 2;
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index f64cfda..5753d62 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -66,6 +66,7 @@
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.HiveStatsUtils;
import org.apache.hadoop.hive.common.ObjectPair;
+import org.apache.hadoop.hive.common.PermissionsInheritor;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable;
@@ -2889,8 +2890,7 @@ private static void copyFiles(final HiveConf conf, final FileSystem destFs,
if (!fullDestStatus.getFileStatus().isDirectory()) {
throw new HiveException(destf + " is not a directory.");
}
- final boolean inheritPerms = HiveConf.getBoolVar(conf,
- HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
+ final boolean inheritPerms = PermissionsInheritor.shouldInheritPerms(conf, destFs);
final List>> futures = new LinkedList<>();
final ExecutorService pool = conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25) > 0 ?
Executors.newFixedThreadPool(conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25),
@@ -2939,8 +2939,9 @@ private static void copyFiles(final HiveConf conf, final FileSystem destFs,
Path destPath = mvFile(conf, srcFs, srcP, destFs, destf, isSrcLocal, isRenameAllowed);
if (inheritPerms) {
- HdfsUtils.setFullFileStatus(conf, fullDestStatus, srcGroup, destFs, destPath, false);
+ PermissionsInheritor.inheritPerms(conf, fullDestStatus, srcGroup, destFs, destPath, false);
}
+
if (null != newFiles) {
newFiles.add(destPath);
}
@@ -2952,7 +2953,7 @@ private static void copyFiles(final HiveConf conf, final FileSystem destFs,
}
if (null == pool) {
if (inheritPerms) {
- HdfsUtils.setFullFileStatus(conf, fullDestStatus, null, destFs, destf, true);
+ PermissionsInheritor.inheritPerms(conf, fullDestStatus, destFs, destf, true);
}
} else {
pool.shutdown();
@@ -3105,8 +3106,7 @@ public static boolean moveFile(final HiveConf conf, Path srcf, final Path destf,
}
//needed for perm inheritance.
- final boolean inheritPerms = HiveConf.getBoolVar(conf,
- HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
+ final boolean inheritPerms = PermissionsInheritor.shouldInheritPerms(conf, destFs);
HdfsUtils.HadoopFileStatus destStatus = null;
// If source path is a subdirectory of the destination path:
@@ -3118,7 +3118,7 @@ public static boolean moveFile(final HiveConf conf, Path srcf, final Path destf,
boolean destIsSubDir = isSubDir(srcf, destf, srcFs, destFs, isSrcLocal);
try {
if (inheritPerms || replace) {
- try{
+ try {
destStatus = new HdfsUtils.HadoopFileStatus(conf, destFs, destf);
//if destf is an existing directory:
//if replace is true, delete followed by rename(mv) is equivalent to replace
@@ -3142,7 +3142,7 @@ public static boolean moveFile(final HiveConf conf, Path srcf, final Path destf,
// For local src file, copy to hdfs
destFs.copyFromLocalFile(srcf, destf);
if (inheritPerms) {
- HdfsUtils.setFullFileStatus(conf, destStatus, destFs, destf, true);
+ PermissionsInheritor.inheritPerms(conf, destStatus, destFs, destf, true);
}
return true;
} else {
@@ -3178,7 +3178,7 @@ public Void call() throws Exception {
final String group = srcStatus.getGroup();
if(destFs.rename(srcStatus.getPath(), destFile)) {
if (inheritPerms) {
- HdfsUtils.setFullFileStatus(conf, desiredStatus, group, destFs, destFile, false);
+ PermissionsInheritor.inheritPerms(conf, desiredStatus, group, destFs, destFile, false);
}
} else {
throw new IOException("rename for src path: " + srcStatus.getPath() + " to dest path:"
@@ -3191,7 +3191,7 @@ public Void call() throws Exception {
}
if (null == pool) {
if (inheritPerms) {
- HdfsUtils.setFullFileStatus(conf, desiredStatus, null, destFs, destf, true);
+ PermissionsInheritor.inheritPerms(conf, desiredStatus, destFs, destf, true);
}
} else {
pool.shutdown();
@@ -3209,7 +3209,7 @@ public Void call() throws Exception {
} else {
if (destFs.rename(srcf, destf)) {
if (inheritPerms) {
- HdfsUtils.setFullFileStatus(conf, destStatus, destFs, destf, true);
+ PermissionsInheritor.inheritPerms(conf, destStatus, destFs, destf, true);
}
return true;
}
@@ -3261,12 +3261,10 @@ static protected boolean needToCopy(Path srcf, Path destf, FileSystem srcFs, Fil
*/
static protected void copyFiles(HiveConf conf, Path srcf, Path destf,
FileSystem fs, boolean isSrcLocal, boolean isAcid, List newFiles) throws HiveException {
- boolean inheritPerms = HiveConf.getBoolVar(conf,
- HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
try {
// create the destination if it does not exist
if (!fs.exists(destf)) {
- FileUtils.mkdir(fs, destf, inheritPerms, conf);
+ FileUtils.mkdir(fs, destf, conf);
}
} catch (IOException e) {
throw new HiveException(
@@ -3460,9 +3458,7 @@ protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath,
// first call FileUtils.mkdir to make sure that destf directory exists, if not, it creates
// destf with inherited permissions
- boolean inheritPerms = HiveConf.getBoolVar(conf, HiveConf.ConfVars
- .HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
- boolean destfExist = FileUtils.mkdir(destFs, destf, inheritPerms, conf);
+ boolean destfExist = FileUtils.mkdir(destFs, destf, conf);
if(!destfExist) {
throw new IOException("Directory " + destf.toString()
+ " does not exist and could not be created.");
diff --git a/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java b/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
deleted file mode 100644
index 277738f..0000000
--- a/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.io;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-import com.google.common.annotations.VisibleForTesting;
-
-import org.apache.commons.lang.ArrayUtils;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclEntryScope;
-import org.apache.hadoop.fs.permission.AclEntryType;
-import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Joiner;
-import com.google.common.base.Objects;
-import com.google.common.base.Predicate;
-import com.google.common.collect.Iterables;
-
-public class HdfsUtils {
- private static final Logger LOG = LoggerFactory.getLogger("shims.HdfsUtils");
-
- // TODO: this relies on HDFS not changing the format; we assume if we could get inode ID, this
- // is still going to work. Otherwise, file IDs can be turned off. Later, we should use
- // as public utility method in HDFS to obtain the inode-based path.
- private static final String HDFS_ID_PATH_PREFIX = "/.reserved/.inodes/";
-
- public static Path getFileIdPath(
- FileSystem fileSystem, Path path, long fileId) {
- return (fileSystem instanceof DistributedFileSystem)
- ? new Path(HDFS_ID_PATH_PREFIX + fileId) : path;
- }
-
- /**
- * Copy the permissions, group, and ACLs from a source {@link HadoopFileStatus} to a target {@link Path}. This method
- * will only log a warning if permissions cannot be set, no exception will be thrown.
- *
- * @param conf the {@link Configuration} used when setting permissions and ACLs
- * @param sourceStatus the source {@link HadoopFileStatus} to copy permissions and ACLs from
- * @param fs the {@link FileSystem} that contains the target {@link Path}
- * @param target the {@link Path} to copy permissions, group, and ACLs to
- * @param recursion recursively set permissions and ACLs on the target {@link Path}
- */
- public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus,
- FileSystem fs, Path target, boolean recursion) {
- setFullFileStatus(conf, sourceStatus, null, fs, target, recursion);
- }
-
- /**
- * Copy the permissions, group, and ACLs from a source {@link HadoopFileStatus} to a target {@link Path}. This method
- * will only log a warning if permissions cannot be set, no exception will be thrown.
- *
- * @param conf the {@link Configuration} used when setting permissions and ACLs
- * @param sourceStatus the source {@link HadoopFileStatus} to copy permissions and ACLs from
- * @param targetGroup the group of the target {@link Path}, if this is set and it is equal to the source group, an
- * extra set group operation is avoided
- * @param fs the {@link FileSystem} that contains the target {@link Path}
- * @param target the {@link Path} to copy permissions, group, and ACLs to
- * @param recursion recursively set permissions and ACLs on the target {@link Path}
- */
- public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus,
- String targetGroup, FileSystem fs, Path target, boolean recursion) {
- setFullFileStatus(conf, sourceStatus, targetGroup, fs, target, recursion, recursion ? new FsShell() : null);
- }
-
- @VisibleForTesting
- static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus,
- String targetGroup, FileSystem fs, Path target, boolean recursion, FsShell fsShell) {
- try {
- FileStatus fStatus = sourceStatus.getFileStatus();
- String group = fStatus.getGroup();
- boolean aclEnabled = Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true");
- FsPermission sourcePerm = fStatus.getPermission();
- List aclEntries = null;
- if (aclEnabled) {
- if (sourceStatus.getAclEntries() != null) {
- LOG.trace(sourceStatus.getAclStatus().toString());
- aclEntries = new ArrayList<>(sourceStatus.getAclEntries());
- removeBaseAclEntries(aclEntries);
-
- //the ACL api's also expect the tradition user/group/other permission in the form of ACL
- aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, sourcePerm.getUserAction()));
- aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, sourcePerm.getGroupAction()));
- aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, sourcePerm.getOtherAction()));
- }
- }
-
- if (recursion) {
- //use FsShell to change group, permissions, and extended ACL's recursively
- fsShell.setConf(conf);
- //If there is no group of a file, no need to call chgrp
- if (group != null && !group.isEmpty()) {
- run(fsShell, new String[]{"-chgrp", "-R", group, target.toString()});
- }
- if (aclEnabled) {
- if (null != aclEntries) {
- //Attempt extended Acl operations only if its enabled, 8791but don't fail the operation regardless.
- try {
- //construct the -setfacl command
- String aclEntry = Joiner.on(",").join(aclEntries);
- run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, target.toString()});
-
- } catch (Exception e) {
- LOG.info("Skipping ACL inheritance: File system for path " + target + " " +
- "does not support ACLs but dfs.namenode.acls.enabled is set to true. ");
- LOG.debug("The details are: " + e, e);
- }
- }
- } else {
- String permission = Integer.toString(sourcePerm.toShort(), 8);
- run(fsShell, new String[]{"-chmod", "-R", permission, target.toString()});
- }
- } else {
- if (group != null && !group.isEmpty()) {
- if (targetGroup == null ||
- !group.equals(targetGroup)) {
- fs.setOwner(target, null, group);
- }
- }
- if (aclEnabled) {
- if (null != aclEntries) {
- fs.setAcl(target, aclEntries);
- }
- } else {
- fs.setPermission(target, sourcePerm);
- }
- }
- } catch (Exception e) {
- LOG.warn(
- "Unable to inherit permissions for file " + target + " from file " + sourceStatus.getFileStatus().getPath(),
- e.getMessage());
- LOG.debug("Exception while inheriting permissions", e);
- }
- }
-
- /**
- * Create a new AclEntry with scope, type and permission (no name).
- *
- * @param scope
- * AclEntryScope scope of the ACL entry
- * @param type
- * AclEntryType ACL entry type
- * @param permission
- * FsAction set of permissions in the ACL entry
- * @return AclEntry new AclEntry
- */
- private static AclEntry newAclEntry(AclEntryScope scope, AclEntryType type,
- FsAction permission) {
- return new AclEntry.Builder().setScope(scope).setType(type)
- .setPermission(permission).build();
- }
- /**
- * Removes basic permission acls (unamed acls) from the list of acl entries
- * @param entries acl entries to remove from.
- */
- private static void removeBaseAclEntries(List entries) {
- Iterables.removeIf(entries, new Predicate() {
- @Override
- public boolean apply(AclEntry input) {
- if (input.getName() == null) {
- return true;
- }
- return false;
- }
- });
- }
-
- private static void run(FsShell shell, String[] command) throws Exception {
- LOG.debug(ArrayUtils.toString(command));
- int retval = shell.run(command);
- LOG.debug("Return value is :" + retval);
- }
-public static class HadoopFileStatus {
-
- private final FileStatus fileStatus;
- private final AclStatus aclStatus;
-
- public HadoopFileStatus(Configuration conf, FileSystem fs, Path file) throws IOException {
-
- FileStatus fileStatus = fs.getFileStatus(file);
- AclStatus aclStatus = null;
- if (Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true")) {
- //Attempt extended Acl operations only if its enabled, but don't fail the operation regardless.
- try {
- aclStatus = fs.getAclStatus(file);
- } catch (Exception e) {
- LOG.info("Skipping ACL inheritance: File system for path " + file + " " +
- "does not support ACLs but dfs.namenode.acls.enabled is set to true. ");
- LOG.debug("The details are: " + e, e);
- }
- }this.fileStatus = fileStatus;
- this.aclStatus = aclStatus;
- }
-
- public FileStatus getFileStatus() {
- return fileStatus;
- }
-
- public List getAclEntries() {
- return aclStatus == null ? null : Collections.unmodifiableList(aclStatus.getEntries());
- }
-
- @VisibleForTesting
- AclStatus getAclStatus() {
- return this.aclStatus;
- }
-}
-}
diff --git a/shims/common/src/main/test/org/apache/hadoop/hive/io/TestHdfsUtils.java b/shims/common/src/main/test/org/apache/hadoop/hive/io/TestHdfsUtils.java
deleted file mode 100644
index 86a132c..0000000
--- a/shims/common/src/main/test/org/apache/hadoop/hive/io/TestHdfsUtils.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.io;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclStatus;
-import org.apache.hadoop.fs.permission.FsPermission;
-
-import org.junit.Test;
-
-import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-
-public class TestHdfsUtils {
-
- /**
- * Tests that {@link HdfsUtils#setFullFileStatus(Configuration, HdfsUtils.HadoopFileStatus, String, FileSystem, Path, boolean)}
- * does not throw an exception when setting the group and without recursion.
- */
- @Test
- public void testSetFullFileStatusFailInheritGroup() throws IOException {
- Configuration conf = new Configuration();
- conf.set("dfs.namenode.acls.enabled", "false");
-
- HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class);
- FileStatus mockSourceStatus = mock(FileStatus.class);
- FileSystem fs = mock(FileSystem.class);
-
- when(mockSourceStatus.getGroup()).thenReturn("fakeGroup1");
- when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus);
- doThrow(RuntimeException.class).when(fs).setOwner(any(Path.class), any(String.class), any(String.class));
-
- HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "fakeGroup2", fs, new Path("fakePath"), false);
- verify(fs).setOwner(any(Path.class), any(String.class), any(String.class));
- }
-
- /**
- * Tests that {@link HdfsUtils#setFullFileStatus(Configuration, HdfsUtils.HadoopFileStatus, FileSystem, Path, boolean)}
- * does not thrown an exception when setting ACLs and without recursion.
- */
- @Test
- public void testSetFullFileStatusFailInheritAcls() throws IOException {
- Configuration conf = new Configuration();
- conf.set("dfs.namenode.acls.enabled", "true");
-
- HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class);
- FileStatus mockSourceStatus = mock(FileStatus.class);
- AclStatus mockAclStatus = mock(AclStatus.class);
- FileSystem mockFs = mock(FileSystem.class);
-
- when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777));
- when(mockAclStatus.toString()).thenReturn("");
- when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus);
- when(mockHadoopFileStatus.getAclEntries()).thenReturn(new ArrayList());
- when(mockHadoopFileStatus.getAclStatus()).thenReturn(mockAclStatus);
- doThrow(RuntimeException.class).when(mockFs).setAcl(any(Path.class), any(List.class));
-
- HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, mockFs, new Path("fakePath"), false);
- verify(mockFs).setAcl(any(Path.class), any(List.class));
- }
-
- /**
- * Tests that {@link HdfsUtils#setFullFileStatus(Configuration, HdfsUtils.HadoopFileStatus, FileSystem, Path, boolean)}
- * does not thrown an exception when setting permissions and without recursion.
- */
- @Test
- public void testSetFullFileStatusFailInheritPerms() throws IOException {
- Configuration conf = new Configuration();
- conf.set("dfs.namenode.acls.enabled", "false");
-
- HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class);
- FileStatus mockSourceStatus = mock(FileStatus.class);
- FileSystem mockFs = mock(FileSystem.class);
-
- when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777));
- when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus);
- doThrow(RuntimeException.class).when(mockFs).setPermission(any(Path.class), any(FsPermission.class));
-
- HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, mockFs, new Path("fakePath"), false);
- verify(mockFs).setPermission(any(Path.class), any(FsPermission.class));
- }
-
- /**
- * Tests that {@link HdfsUtils#setFullFileStatus(Configuration, HdfsUtils.HadoopFileStatus, String, FileSystem, Path, boolean)}
- * does not throw an exception when setting the group and with recursion.
- */
- @Test
- public void testSetFullFileStatusFailInheritGroupRecursive() throws Exception {
- Configuration conf = new Configuration();
- conf.set("dfs.namenode.acls.enabled", "false");
-
- String fakeSourceGroup = "fakeGroup1";
- String fakeTargetGroup = "fakeGroup2";
- Path fakeTarget = new Path("fakePath");
- HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class);
- FileStatus mockSourceStatus = mock(FileStatus.class);
- FsShell mockFsShell = mock(FsShell.class);
-
- when(mockSourceStatus.getGroup()).thenReturn(fakeSourceGroup);
- when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus);
- doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class));
-
- HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, fakeTargetGroup, mock(FileSystem.class), fakeTarget,
- true, mockFsShell);
- verify(mockFsShell).run(new String[]{"-chgrp", "-R", fakeSourceGroup, fakeTarget.toString()});
- }
-
- /**
- * Tests that {@link HdfsUtils#setFullFileStatus(Configuration, HdfsUtils.HadoopFileStatus, FileSystem, Path, boolean)}
- * does not thrown an exception when setting ACLs and with recursion.
- */
- @Test
- public void testSetFullFileStatusFailInheritAclsRecursive() throws Exception {
- Configuration conf = new Configuration();
- conf.set("dfs.namenode.acls.enabled", "true");
-
- Path fakeTarget = new Path("fakePath");
- HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class);
- FileStatus mockSourceStatus = mock(FileStatus.class);
- FsShell mockFsShell = mock(FsShell.class);
- AclStatus mockAclStatus = mock(AclStatus.class);
-
- when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777));
- when(mockAclStatus.toString()).thenReturn("");
- when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus);
- when(mockHadoopFileStatus.getAclEntries()).thenReturn(new ArrayList());
- when(mockHadoopFileStatus.getAclStatus()).thenReturn(mockAclStatus);
- doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class));
-
- HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "", mock(FileSystem.class), fakeTarget, true, mockFsShell);
- verify(mockFsShell).run(new String[]{"-setfacl", "-R", "--set", any(String.class), fakeTarget.toString()});
- }
-
- /**
- * Tests that {@link HdfsUtils#setFullFileStatus(Configuration, HdfsUtils.HadoopFileStatus, FileSystem, Path, boolean)}
- * does not thrown an exception when setting permissions and with recursion.
- */
- @Test
- public void testSetFullFileStatusFailInheritPermsRecursive() throws Exception {
- Configuration conf = new Configuration();
- conf.set("dfs.namenode.acls.enabled", "false");
-
- Path fakeTarget = new Path("fakePath");
- HdfsUtils.HadoopFileStatus mockHadoopFileStatus = mock(HdfsUtils.HadoopFileStatus.class);
- FileStatus mockSourceStatus = mock(FileStatus.class);
- FsShell mockFsShell = mock(FsShell.class);
-
- when(mockSourceStatus.getPermission()).thenReturn(new FsPermission((short) 777));
- when(mockHadoopFileStatus.getFileStatus()).thenReturn(mockSourceStatus);
- doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class));
-
- HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "", mock(FileSystem.class), fakeTarget,
- true, mockFsShell);
- verify(mockFsShell).run(new String[]{"-chmod", "-R", any(String.class), fakeTarget.toString()});
- }
-}