diff --git common/pom.xml common/pom.xml
index 8d4b1ea..d656567 100644
--- common/pom.xml
+++ common/pom.xml
@@ -147,6 +147,12 @@
${hadoop-23.version}
true
+
+ org.apache.hadoop
+ hadoop-hdfs
+ ${hadoop-23.version}
+ true
+
diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
index 4fb09e7..378ede1 100644
--- hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
+++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java
@@ -37,6 +37,7 @@
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
@@ -795,15 +796,15 @@ private void registerPartitions(JobContext context) throws IOException{
FsPermission perms = tblStat.getPermission();
ArrayList acls = null;
- if (conf.getBoolean("dfs.namenode.acls.enabled", false)) {
- try {
- AclStatus stat = fs.getAclStatus(tblPath);
- if (hasExtendedAcls(stat)) {
- acls = getDefaultAclEntries(stat, perms);
- }
- } catch (UnsupportedOperationException e) {
- LOG.debug("Skipping ACLs", e);
+ try {
+ AclStatus stat = fs.getAclStatus(tblPath);
+ if (hasExtendedAcls(stat)) {
+ acls = getDefaultAclEntries(stat, perms);
}
+ } catch (AclException e) {
+ LOG.debug("Skipping ACLs", e);
+ } catch (UnsupportedOperationException e) {
+ LOG.debug("Skipping ACLs", e);
}
List partitionsToAdd = new ArrayList();
diff --git metastore/pom.xml metastore/pom.xml
index 262aaf7..c38ed9d 100644
--- metastore/pom.xml
+++ metastore/pom.xml
@@ -159,6 +159,13 @@
org.apache.hadoop
+ hadoop-hdfs
+ ${hadoop-23.version}
+ test
+ true
+
+
+ org.apache.hadoop
hadoop-mapreduce-client-core
${hadoop-23.version}
true
diff --git shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
index cecc6b5..64f24ea 100644
--- shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
+++ shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java
@@ -66,6 +66,7 @@
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.io.LongWritable;
@@ -255,10 +256,6 @@ public String getJobLauncherHttpAddress(Configuration conf) {
return conf.get("yarn.resourcemanager.webapp.address");
}
- protected boolean isExtendedAclEnabled(Configuration conf) {
- return Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true");
- }
-
@Override
public long getDefaultBlockSize(FileSystem fs, Path path) {
return fs.getDefaultBlockSize(path);
@@ -730,14 +727,14 @@ public HdfsFileStatus getFullFileStatus(Configuration conf, FileSystem fs,
Path file) throws IOException {
FileStatus fileStatus = fs.getFileStatus(file);
AclStatus aclStatus = null;
- if (isExtendedAclEnabled(conf)) {
- //Attempt extended Acl operations only if its enabled, but don't fail the operation regardless.
- try {
- aclStatus = fs.getAclStatus(file);
- } catch (Exception e) {
- LOG.info("Skipping ACL inheritance: File system for path " + file + " " +
- "does not support ACLs but dfs.namenode.acls.enabled is set to true: " + e, e);
- }
+
+ //Attempt extended Acl operations only if its enabled, but don't fail the operation regardless.
+ try {
+ aclStatus = fs.getAclStatus(file);
+ } catch (AclException e) {
+ LOG.debug("Skipping extended ACLs", e);
+ } catch (UnsupportedOperationException e) {
+ LOG.debug("Skipping extended ACLs", e);
}
return new Hadoop23FileStatus(fileStatus, aclStatus);
}
@@ -752,21 +749,10 @@ public void setFullFileStatus(Configuration conf, HdfsFileStatus sourceStatus,
fsShell.setConf(conf);
run(fsShell, new String[]{"-chgrp", "-R", group, target.toString()});
- if (isExtendedAclEnabled(conf)) {
- //Attempt extended Acl operations only if its enabled, 8791but don't fail the operation regardless.
- try {
- if (hasExtendedAcls((Hadoop23FileStatus) sourceStatus)) {
- List aclEntries = getDefaultAclEntries((Hadoop23FileStatus) sourceStatus);
- String aclEntry = Joiner.on(",").join(aclEntries);
- run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, target.toString()});
- } else {
- String permission = Integer.toString(sourceStatus.getFileStatus().getPermission().toShort(), 8);
- run(fsShell, new String[]{"-chmod", "-R", permission, target.toString()});
- }
- } catch (Exception e) {
- LOG.info("Skipping ACL inheritance: File system for path " + target + " " +
- "does not support ACLs but dfs.namenode.acls.enabled is set to true: " + e, e);
- }
+ if (hasExtendedAcls((Hadoop23FileStatus) sourceStatus)) {
+ List aclEntries = getDefaultAclEntries((Hadoop23FileStatus) sourceStatus);
+ String aclEntry = Joiner.on(",").join(aclEntries);
+ run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, target.toString()});
} else {
String permission = Integer.toString(sourceStatus.getFileStatus().getPermission().toShort(), 8);
run(fsShell, new String[]{"-chmod", "-R", permission, target.toString()});