diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java index fb28573..9a6ad58 100644 --- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java @@ -661,7 +661,7 @@ public static boolean renameWithPerms(FileSystem fs, Path sourcePath, //rename the directory if (fs.rename(sourcePath, destPath)) { try { - HdfsUtils.setFullFileStatus(conf, new HdfsUtils.HadoopFileStatus(conf, fs, destPath.getParent()), fs, destPath, true); + HdfsUtils.setFullFileStatus(conf, new HdfsUtils.HadoopFileStatus(conf, fs, destPath.getParent()), null, fs, destPath, true, true, true); } catch (Exception e) { LOG.warn("Error setting permissions or group of " + destPath, e); } diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java index 9056f11..ff43f69 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java @@ -32,6 +32,10 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.StatsSetupConst; @@ -334,7 +338,7 @@ private Partition constructPartition( String partLocnRoot, String dynPartPath, Map partKVs, HCatSchema outputSchema, Map params, Table table, FileSystem fs, - String grpName, FsPermission perms) throws IOException { + String grpName, FsPermission perms, List acls) throws IOException { Partition partition = new Partition(); partition.setDbName(table.getDbName()); @@ -371,7 +375,7 @@ private Partition constructPartition( for (FieldSchema partKey : table.getPartitionKeys()) { if (i++ != 0) { fs.mkdirs(partPath); // Attempt to make the path in case it does not exist before we check - applyGroupAndPerms(fs, partPath, perms, grpName, false); + applyGroupAndPerms(fs, partPath, perms, acls, grpName, false); } partPath = constructPartialPartPath(partPath, partKey.getName().toLowerCase(), partKVs); } @@ -381,7 +385,7 @@ private Partition constructPartition( // Need not bother in case of HDFS as permission is taken care of by setting UMask fs.mkdirs(partPath); // Attempt to make the path in case it does not exist before we check if (!ShimLoader.getHadoopShims().getHCatShim().isFileInHDFS(fs, partPath)) { - applyGroupAndPerms(fs, partPath, perms, grpName, true); + applyGroupAndPerms(fs, partPath, perms, acls, grpName, true); } // Set the location in the StorageDescriptor @@ -401,7 +405,7 @@ private Partition constructPartition( } private void applyGroupAndPerms(FileSystem fs, Path dir, FsPermission permission, - String group, boolean recursive) + List acls, String group, boolean recursive) throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("applyGroupAndPerms : " + dir + @@ -409,10 +413,13 @@ private void applyGroupAndPerms(FileSystem fs, Path dir, FsPermission permission " group: " + group + " recursive: " + recursive); } fs.setPermission(dir, permission); + if ((acls != null) && (acls.size() > 0)) { + fs.setAcl(dir, acls); + } if (recursive) { for (FileStatus fileStatus : fs.listStatus(dir)) { if (fileStatus.isDir()) { - applyGroupAndPerms(fs, fileStatus.getPath(), permission, group, true); + applyGroupAndPerms(fs, fileStatus.getPath(), permission, acls, group, true); } else { fs.setPermission(fileStatus.getPath(), permission); } @@ -796,18 +803,30 @@ private void registerPartitions(JobContext context) throws IOException{ FileStatus tblStat = fs.getFileStatus(tblPath); String grpName = tblStat.getGroup(); FsPermission perms = tblStat.getPermission(); + ArrayList acls = null; + + if (conf.getBoolean("dfs.namenode.acls.enabled", false)) { + try { + AclStatus stat = fs.getAclStatus(tblPath); + if (hasExtendedAcls(stat)) { + acls = getDefaultAclEntries(stat, perms); + } + } catch (UnsupportedOperationException e) { + LOG.debug("Skipping ACLs", e); + } + } List partitionsToAdd = new ArrayList(); if (!dynamicPartitioningUsed) { partitionsToAdd.add(constructPartition(context, jobInfo, tblPath.toString(), null, jobInfo.getPartitionValues(), jobInfo.getOutputSchema(), getStorerParameterMap(storer), - table, fs, grpName, perms)); + table, fs, grpName, perms, acls)); } else { for (Entry> entry : partitionsDiscoveredByPath.entrySet()) { partitionsToAdd.add(constructPartition(context, jobInfo, getPartitionRootLocation(entry.getKey(), entry.getValue().size()), entry.getKey(), entry.getValue(), jobInfo.getOutputSchema(), getStorerParameterMap(storer), table, - fs, grpName, perms)); + fs, grpName, perms, acls)); } } @@ -943,7 +962,7 @@ private void registerPartitions(JobContext context) throws IOException{ // Set permissions appropriately for each of the partitions we just created // so as to have their permissions mimic the table permissions for (Partition p : partitionsAdded){ - applyGroupAndPerms(fs,new Path(p.getSd().getLocation()),tblStat.getPermission(),tblStat.getGroup(),true); + applyGroupAndPerms(fs,new Path(p.getSd().getLocation()), tblStat.getPermission(), acls,tblStat.getGroup(),true); } } @@ -1017,5 +1036,80 @@ private void cancelDelegationTokens(JobContext context) throws IOException{ } } + private ArrayList getDefaultAclEntries(AclStatus stat, FsPermission perms) { + ArrayList defaults = new ArrayList(); + + boolean[] hasDefaults = { false, false, false, false }; + + for (AclEntry e : stat.getEntries()) { + if (e.getScope() == AclEntryScope.DEFAULT) { + AclEntry acl = new AclEntry.Builder().setName(e.getName()).setScope(AclEntryScope.ACCESS) + .setType(e.getType()).setPermission(e.getPermission()).build(); + + defaults.add(acl); + defaults.add(e); + + if (e.getName() == null) { + if (e.getType() == AclEntryType.USER) { + hasDefaults[0] = true; + } + if (e.getType() == AclEntryType.GROUP) { + hasDefaults[1] = true; + } + if (e.getType() == AclEntryType.OTHER) { + hasDefaults[2] = true; + } + if (e.getType() == AclEntryType.MASK) { + hasDefaults[3] = true; + } + } + } + } + + if (! hasDefaults[0]) { + AclEntry acl = new AclEntry.Builder().setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.USER).setPermission(perms.getUserAction()).build(); + defaults.add(acl); + + hasDefaults[0] = true; + } + + if (! hasDefaults[1]) { + for (AclEntry e : stat.getEntries()) { + if ((e.getScope() == AclEntryScope.ACCESS) && (e.getType() == AclEntryType.GROUP) && (e.getName() == null)) { + AclEntry acl = new AclEntry.Builder().setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.GROUP).setPermission(e.getPermission()).build(); + defaults.add(acl); + + hasDefaults[1] = true; + } + } + } + + if (! hasDefaults[2]) { + AclEntry acl = new AclEntry.Builder().setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.OTHER).setPermission(perms.getOtherAction()).build(); + defaults.add(acl); + + hasDefaults[2] = true; + } + + if (! hasDefaults[3]) { + AclEntry acl = new AclEntry.Builder().setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.MASK).setPermission(perms.getGroupAction()).build(); + defaults.add(acl); + + hasDefaults[3] = true; + } + + return defaults; + } + + private boolean hasExtendedAcls(AclStatus status) { + if (status != null) { + return status.getEntries().size() > 0; + } + return false; + } } diff --git a/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java b/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java index b798379..867e2a3 100644 --- a/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java +++ b/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java @@ -18,12 +18,14 @@ package org.apache.hadoop.hive.ql.security; import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; +import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT; import static org.apache.hadoop.fs.permission.AclEntryType.GROUP; import static org.apache.hadoop.fs.permission.AclEntryType.OTHER; import static org.apache.hadoop.fs.permission.AclEntryType.USER; import java.util.List; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; @@ -31,12 +33,10 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.junit.Assert; import org.junit.BeforeClass; import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; public class TestExtendedAcls extends FolderPermissionBase { @@ -56,6 +56,19 @@ public static void setup() throws Exception { aclEntry(ACCESS, USER, "bar", FsAction.READ_WRITE), aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, GROUP, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, USER, "bar", FsAction.READ_WRITE), + aclEntry(DEFAULT, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, GROUP, "bar", FsAction.READ_WRITE), + aclEntry(DEFAULT, GROUP, "foo", FsAction.READ_EXECUTE)); + + private final ImmutableList aclSpec1ForFile = ImmutableList.of( + aclEntry(ACCESS, USER, FsAction.ALL), + aclEntry(ACCESS, GROUP, FsAction.ALL), + aclEntry(ACCESS, OTHER, FsAction.ALL), + aclEntry(ACCESS, USER, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar", FsAction.READ_WRITE), aclEntry(ACCESS, GROUP, "foo", FsAction.READ_EXECUTE)); private final ImmutableList aclSpec2 = ImmutableList.of( @@ -65,16 +78,29 @@ public static void setup() throws Exception { aclEntry(ACCESS, USER, "bar2", FsAction.READ_WRITE), aclEntry(ACCESS, USER, "foo2", FsAction.READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar2", FsAction.READ), + aclEntry(ACCESS, GROUP, "foo2", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, USER, "bar2", FsAction.READ_WRITE), + aclEntry(DEFAULT, USER, "foo2", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, GROUP, "bar2", FsAction.READ), + aclEntry(DEFAULT, GROUP, "foo2", FsAction.READ_EXECUTE)); + + private final ImmutableList aclSpec2ForFile = ImmutableList.of( + aclEntry(ACCESS, USER, FsAction.ALL), + aclEntry(ACCESS, GROUP, FsAction.ALL), + aclEntry(ACCESS, OTHER, FsAction.READ_EXECUTE), + aclEntry(ACCESS, USER, "bar2", FsAction.READ_WRITE), + aclEntry(ACCESS, USER, "foo2", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar2", FsAction.READ), aclEntry(ACCESS, GROUP, "foo2", FsAction.READ_EXECUTE)); @Override - public void setPermission(String locn, int permIndex) throws Exception { + public void setPermission(String locn, int permIndex, boolean isFile) throws Exception { switch (permIndex) { case 0: - setAcl(locn, aclSpec1); + setAcl(locn, isFile ? aclSpec1ForFile : aclSpec1, isFile); break; case 1: - setAcl(locn, aclSpec2); + setAcl(locn, isFile ? aclSpec2ForFile : aclSpec2, isFile); break; default: throw new RuntimeException("Only 2 permissions by this test"); @@ -82,21 +108,27 @@ public void setPermission(String locn, int permIndex) throws Exception { } @Override + public void setPermission(String locn, int permIndex) throws Exception { + setPermission(locn, permIndex, false); + } + + @Override public void verifyPermission(String locn, int permIndex) throws Exception { + FileStatus fstat = fs.getFileStatus(new Path(locn)); + FsPermission perm = fstat.getPermission(); + switch (permIndex) { case 0: - FsPermission perm = fs.getFileStatus(new Path(locn)).getPermission(); Assert.assertEquals("Location: " + locn, "rwxrwxrwx", String.valueOf(perm)); List actual = getAcl(locn); - verifyAcls(aclSpec1, actual); + verifyAcls(aclSpec1, actual, fstat.isFile()); break; case 1: - perm = fs.getFileStatus(new Path(locn)).getPermission(); Assert.assertEquals("Location: " + locn, "rwxrwxr-x", String.valueOf(perm)); List acls = getAcl(locn); - verifyAcls(aclSpec2, acls); + verifyAcls(aclSpec2, acls, fstat.isFile()); break; default: throw new RuntimeException("Only 2 permissions by this test: " + permIndex); @@ -139,9 +171,14 @@ private AclEntry aclEntry(AclEntryScope scope, AclEntryType type, .setPermission(permission).build(); } - private void verifyAcls(List expectedList, List actualList) { + private void verifyAcls(List expectedList, List actualList, boolean isFile) { for (AclEntry expected : expectedList) { if (expected.getName() != null) { + if (isFile && expected.getScope() == DEFAULT) { + // Files will not inherit default extended ACL rules from its parent, so ignore them. + continue; + } + //the non-named acl's are coming as regular permission, and not as aclEntries. boolean found = false; for (AclEntry actual : actualList) { @@ -156,7 +193,7 @@ private void verifyAcls(List expectedList, List actualList) } } - private void setAcl(String locn, List aclSpec) throws Exception { + private void setAcl(String locn, List aclSpec, boolean isFile) throws Exception { fs.setAcl(new Path(locn), aclSpec); } diff --git a/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestInheritPermsExtendedAcls.java b/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestInheritPermsExtendedAcls.java new file mode 100644 index 0000000..d6da08e --- /dev/null +++ b/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestInheritPermsExtendedAcls.java @@ -0,0 +1,201 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.security; + +import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; +import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT; +import static org.apache.hadoop.fs.permission.AclEntryType.GROUP; +import static org.apache.hadoop.fs.permission.AclEntryType.OTHER; +import static org.apache.hadoop.fs.permission.AclEntryType.USER; +import static org.apache.hadoop.fs.permission.AclEntryType.MASK; + +import java.util.List; + +import org.junit.Assert; +import org.junit.BeforeClass; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.io.HdfsUtils; +import org.apache.hadoop.hive.io.HdfsUtils.HadoopFileStatus; + +import com.google.common.collect.ImmutableList; + +public class TestInheritPermsExtendedAcls extends FolderPermissionBase { + +@BeforeClass + public static void setup() throws Exception { + conf = new HiveConf(TestInheritPermsExtendedAcls.class); + //setup the mini DFS with acl's enabled. + conf.set("dfs.namenode.acls.enabled", "true"); + conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); + baseSetup(); + } + + private final ImmutableList aclSpec1 = ImmutableList.of( + aclEntry(DEFAULT, USER, FsAction.ALL), + aclEntry(DEFAULT, GROUP, FsAction.READ), + aclEntry(DEFAULT, OTHER, FsAction.READ), + aclEntry(ACCESS, USER, FsAction.ALL), + aclEntry(ACCESS, GROUP, FsAction.READ_WRITE), + aclEntry(ACCESS, OTHER, FsAction.NONE), + aclEntry(DEFAULT, USER, "foo", FsAction.READ), + aclEntry(ACCESS, USER, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, GROUP, "foo", FsAction.READ_EXECUTE)); + + private final ImmutableList aclSpec2 = ImmutableList.of( + aclEntry(DEFAULT, USER, FsAction.ALL), + aclEntry(DEFAULT, GROUP, FsAction.READ), + aclEntry(DEFAULT, OTHER, FsAction.READ_WRITE), + aclEntry(ACCESS, USER, FsAction.ALL), + aclEntry(ACCESS, GROUP, FsAction.READ_WRITE), + aclEntry(ACCESS, OTHER, FsAction.NONE), + aclEntry(DEFAULT, USER, "foo", FsAction.READ), + aclEntry(ACCESS, USER, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, GROUP, "foo", FsAction.READ_EXECUTE)); + + private final ImmutableList warehouseSpec1 = ImmutableList.of( + aclEntry(DEFAULT, USER, FsAction.ALL), + aclEntry(DEFAULT, GROUP, FsAction.READ), + aclEntry(DEFAULT, OTHER, FsAction.READ), + aclEntry(DEFAULT, MASK, FsAction.READ), + aclEntry(ACCESS, GROUP, FsAction.READ_WRITE), + aclEntry(DEFAULT, USER, "foo", FsAction.READ), + aclEntry(ACCESS, USER, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, GROUP, "foo", FsAction.READ_EXECUTE)); + + /** + * Create a new AclEntry with scope, type and permission (no name). + * + * @param scope + * AclEntryScope scope of the ACL entry + * @param type + * AclEntryType ACL entry type + * @param permission + * FsAction set of permissions in the ACL entry + * @return AclEntry new AclEntry + */ + private AclEntry aclEntry(AclEntryScope scope, AclEntryType type, + FsAction permission) { + return new AclEntry.Builder().setScope(scope).setType(type) + .setPermission(permission).build(); + } + + /** + * Create a new AclEntry with scope, type, name and permission. + * + * @param scope + * AclEntryScope scope of the ACL entry + * @param type + * AclEntryType ACL entry type + * @param name + * String optional ACL entry name + * @param permission + * FsAction set of permissions in the ACL entry + * @return AclEntry new AclEntry + */ + private AclEntry aclEntry(AclEntryScope scope, AclEntryType type, + String name, FsAction permission) { + return new AclEntry.Builder().setScope(scope).setType(type).setName(name) + .setPermission(permission).build(); + } + + @Override + public void setPermission(String locn, int permIndex) throws Exception { + Path location = new Path(locn); + FileStatus fstatus = fs.getFileStatus(location); + + switch (permIndex) { + case 0: + fs.setAcl(location, aclSpec1); + break; + case 1: + if(fstatus.isDirectory()){ + fs.setAcl(location, aclSpec2); + } + break; + default: + throw new RuntimeException("Only 2 permissions by this test"); + } + } + + @Override + public void verifyPermission(String locn, int permIndex) throws Exception { + Path location = new Path(locn); + FileStatus fstatus = fs.getFileStatus(location); + FsPermission perm = fstatus.getPermission(); + + if (warehouseDir.toString().equals(locn)) { + Assert.assertEquals(permIndex +" warehouse : " +locn,"rwxrwx---", String.valueOf(perm)); + verifyAcls(locn, warehouseSpec1, fs.getAclStatus(location).getEntries().toArray(new AclEntry[0]), false); + } else { + HadoopFileStatus parentFullFileStatus = new HdfsUtils.HadoopFileStatus(conf, fs, location.getParent()); + List parentAclEntries = parentFullFileStatus.getAclEntries(); + HadoopFileStatus actualFullFileStatus = new HdfsUtils.HadoopFileStatus(conf, fs, location); + List actualAclEntries = actualFullFileStatus.getAclEntries(); + + //check if parent directory has any default ACLs set + for (AclEntry defentry: parentAclEntries) { + if(defentry.getScope().equals(DEFAULT) && (defentry.getName() != null) || defentry.getType().equals(AclEntryType.GROUP)) { + for(int i = 0; i < parentAclEntries.size(); i++){ + AclEntry entry = parentAclEntries.get(i); + if(entry.getName() != null && entry.getScope().equals(ACCESS) && entry.getType().equals(defentry.getType()) && entry.getName().equals(defentry.getName())) { + parentAclEntries.set(i, aclEntry(ACCESS, defentry.getType(), defentry.getName(), defentry.getPermission())); + } else if (entry.getName() == null && entry.getScope().equals(ACCESS) && entry.getType().equals(defentry.getType())){ + parentAclEntries.set(i, aclEntry(ACCESS, defentry.getType(), defentry.getPermission())); + } + } + } + } + verifyAcls(locn, parentAclEntries, actualAclEntries.toArray(new AclEntry[0]), fstatus.isFile()); + } + } + + private void verifyAcls(String locn, List expectedAclSpec, AclEntry[] actualAclSpec, boolean isFile) { + for (AclEntry expected : expectedAclSpec) { + if (expected.getName() != null) { + //if file, there are no default ACLs set + if (isFile && expected.getScope() == DEFAULT) { + // Files will not inherit default extended ACL rules from its parent, so ignore them. + continue; + } + + boolean found = false; + for (AclEntry actual : actualAclSpec) { + if (actual.equals(expected)) { + found = true; + } + } + if (!found) { + Assert.fail("Following Acl does not have a match: " + expected); + } + } + } + } +} diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java index 2ae9cc0..62fca2d 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java @@ -65,6 +65,9 @@ public boolean accept(Path p){ public abstract void verifyPermission(String locn, int permIndex) throws Exception; + public void setPermission(String locn, int permIndex, boolean isFile) throws Exception { + setPermission(locn, permIndex); + } public void setPermission(String locn) throws Exception { setPermission(locn, 0); @@ -260,17 +263,16 @@ public void testInsertStaticSinglePartition() throws Exception { } //insert overwrite test + //only set the permissions for table, partitions will inherit their permissions from the table's permissions setPermission(warehouseDir + "/" + tableName, 1); - setPermission(warehouseDir + "/" + tableName + "/part1=1", 1); ret = driver.run("insert overwrite table " + tableName + " partition(part1='1') select key,value from mysrc where part1='1' and part2='1'"); Assert.assertEquals(0, ret.getResponseCode()); - verifyPermission(warehouseDir + "/" + tableName, 1); - verifyPermission(warehouseDir + "/" + tableName + "/part1=1", 1); + verifyPermission(warehouseDir + "/" + tableName + "/part1=1", 0); Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1").size() > 0); for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1")) { - verifyPermission(child, 1); + verifyPermission(child, 0); } } @@ -297,20 +299,18 @@ public void testInsertStaticDualPartition() throws Exception { } //insert overwrite test + //only set the permissions for table, partitions will inherit their permissions from the table's permissions setPermission(warehouseDir + "/" + tableName, 1); - setPermission(warehouseDir + "/" + tableName + "/part1=1", 1); - setPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1", 1); ret = driver.run("insert overwrite table " + tableName + " partition(part1='1', part2='1') select key,value from mysrc where part1='1' and part2='1'"); Assert.assertEquals(0, ret.getResponseCode()); - verifyPermission(warehouseDir + "/" + tableName, 1); - verifyPermission(warehouseDir + "/" + tableName + "/part1=1", 1); - verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1", 1); + verifyPermission(warehouseDir + "/" + tableName + "/part1=1", 0); + verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1", 0); Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0); for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) { - verifyPermission(child, 1); + verifyPermission(child, 0); } } @@ -335,7 +335,16 @@ public void testInsertDualDynamicPartitions() throws Exception { ret = driver.run("insert overwrite table " + tableName + " partition (part1,part2) select key,value,part1,part2 from mysrc"); Assert.assertEquals(0, ret.getResponseCode()); - verifyDualPartitionTable(warehouseDir + "/" + tableName, 1); + //cannot verify table's and partition's inherited permissions since they were explicitly set + Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0); + for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) { + verifyPermission(child, 1); + } + + Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2").size() > 0); + for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2")) { + verifyPermission(child, 1); + } } @Test @@ -351,14 +360,15 @@ public void testInsertSingleDynamicPartition() throws Exception { setPermission(tableLoc, 0); ret = driver.run("insert into table " + tableName + " partition (part1) select key,value,part1 from mysrc"); Assert.assertEquals(0,ret.getResponseCode()); - verifySinglePartition(tableLoc, 0); + verifySinglePartition(tableLoc, 0, true); //Insert overwrite test, with permission set 1. We need reset existing partitions to 1 since the permissions //should be inherited from existing partition setSinglePartition(tableLoc, 1); ret = driver.run("insert overwrite table " + tableName + " partition (part1) select key,value,part1 from mysrc"); Assert.assertEquals(0,ret.getResponseCode()); - verifySinglePartition(tableLoc, 1); + //cannot verify the partition's permissions here since they were explicitly set + verifySinglePartition(tableLoc, 1, false); //delete and re-insert using insert overwrite. There's different code paths insert vs insert overwrite for new tables. ret = driver.run("DROP TABLE " + tableName); @@ -372,7 +382,7 @@ public void testInsertSingleDynamicPartition() throws Exception { ret = driver.run("insert overwrite table " + tableName + " partition (part1) select key,value,part1 from mysrc"); Assert.assertEquals(0, ret.getResponseCode()); - verifySinglePartition(tableLoc, 0); + verifySinglePartition(tableLoc, 0, true); } @Test @@ -465,7 +475,7 @@ public void testLoadLocal() throws Exception { //case1B: load data local into overwrite non-partitioned-table setPermission(warehouseDir + "/" + tableName, 1); for (String child : listStatus(tableLoc)) { - setPermission(child, 1); + setPermission(child, 1, true); } ret = driver.run("load data local inpath '" + dataFilePath + "' overwrite into table " + tableName); Assert.assertEquals(0,ret.getResponseCode()); @@ -499,7 +509,7 @@ public void testLoadLocal() throws Exception { setPermission(tableLoc, 1); setPermission(partLoc, 1); for (String child : listStatus(partLoc)) { - setPermission(child, 1); + setPermission(child, 1, true); } ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' OVERWRITE INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')"); Assert.assertEquals(0,ret.getResponseCode()); @@ -536,7 +546,7 @@ public void testLoad() throws Exception { //case1B: load data into overwrite non-partitioned-table setPermission(warehouseDir + "/" + tableName, 1); for (String child : listStatus(tableLoc)) { - setPermission(child, 1); + setPermission(child, 1, true); } fs.copyFromLocalFile(dataFilePath, new Path(location)); @@ -574,7 +584,7 @@ public void testLoad() throws Exception { setPermission(partLoc, 1); Assert.assertTrue(listStatus(partLoc).size() > 0); for (String child : listStatus(partLoc)) { - setPermission(child, 1); + setPermission(child, 1, true); } fs.copyFromLocalFile(dataFilePath, new Path(location)); @@ -734,9 +744,11 @@ private void setSinglePartition(String tableLoc, int index) throws Exception { setPermission(tableLoc + "/part1=2", index); } - private void verifySinglePartition(String tableLoc, int index) throws Exception { - verifyPermission(tableLoc + "/part1=1", index); - verifyPermission(tableLoc + "/part1=2", index); + private void verifySinglePartition(String tableLoc, int index, boolean partitions) throws Exception { + if(partitions) { + verifyPermission(tableLoc + "/part1=1", index); + verifyPermission(tableLoc + "/part1=2", index); + } Assert.assertTrue(listStatus(tableLoc + "/part1=1").size() > 0); for (String child : listStatus(tableLoc + "/part1=1")) { @@ -759,7 +771,6 @@ private void setDualPartitionTable(String baseTablePath, int index) throws Excep } private void verifyDualPartitionTable(String baseTablePath, int index) throws Exception { - verifyPermission(baseTablePath, index); verifyPermission(baseTablePath + "/part1=1", index); verifyPermission(baseTablePath + "/part1=1/part2=1", index); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index e320dbf..2522b60 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -2863,7 +2863,7 @@ private static void copyFiles(final HiveConf conf, final FileSystem destFs, } if (inheritPerms) { - HdfsUtils.setFullFileStatus(conf, fullDestStatus, srcGroup, destFs, destPath, false); + HdfsUtils.setFullFileStatus(conf, fullDestStatus, srcGroup, destFs, destPath, false, false, false); } if (null != newFiles) { newFiles.add(destPath); @@ -3086,7 +3086,7 @@ public Void call() throws Exception { final String group = srcStatus.getGroup(); if(destFs.rename(srcStatus.getPath(), destFile)) { if (inheritPerms) { - HdfsUtils.setFullFileStatus(conf, desiredStatus, group, destFs, destFile, false); + HdfsUtils.setFullFileStatus(conf, desiredStatus, group, destFs, destFile, false, false, false); } } else { throw new IOException("rename for src path: " + srcStatus.getPath() + " to dest path:" diff --git a/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java b/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java index 7b6a9bd..a8a5038 100644 --- a/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java +++ b/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java @@ -60,26 +60,71 @@ public static Path getFileIdPath( public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus, FileSystem fs, Path target, boolean recursion) throws IOException { - setFullFileStatus(conf, sourceStatus, null, fs, target, recursion); + setFullFileStatus(conf, sourceStatus, null, fs, target, recursion, true, false); } public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus, - String targetGroup, FileSystem fs, Path target, boolean recursion) throws IOException { + String targetGroup, FileSystem fs, Path target, boolean recursion) throws IOException { + // Assume source is a directory for purposes of ACLs. + setFullFileStatus(conf, sourceStatus, targetGroup, fs, target, recursion, true, false); + } + + // This method should be called when dealing with ACLs on files, because default ACLs cannot + // be applied to files; an exception will be thrown. + public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus, + String targetGroup, FileSystem fs, Path target, boolean recursion, boolean isDir, + boolean rename) throws IOException { FileStatus fStatus= sourceStatus.getFileStatus(); String group = fStatus.getGroup(); boolean aclEnabled = Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true"); FsPermission sourcePerm = fStatus.getPermission(); List aclEntries = null; + List defaultAclEntries = null; if (aclEnabled) { if (sourceStatus.getAclEntries() != null) { LOG.trace(sourceStatus.aclStatus.toString()); aclEntries = new ArrayList<>(sourceStatus.getAclEntries()); - removeBaseAclEntries(aclEntries); + defaultAclEntries = extractBaseDefaultAclEntries(aclEntries); + if (defaultAclEntries.size() > 0) { + removeBaseAclEntries(aclEntries); + + // remove base acl entries if there is a default acl set for that named user|group + List temp = new ArrayList(); + for (AclEntry entry : aclEntries) { + if (defaultAclEntries.contains(entry)) { + for (AclEntry deleteEntry : aclEntries) { + if (deleteEntry.getType().equals(entry.getType()) + && deleteEntry.getName().equals(entry.getName())) { + temp.add(deleteEntry); + } + } + } + } + if (temp.size() > 0) { + aclEntries.removeAll(temp); + } + + // set directory's ACL entries based on parent's DEFAULT entries + for (AclEntry entry : defaultAclEntries) { + if (entry.getName() != null) { + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, entry.getType(), entry.getName(), + entry.getPermission())); + } else { + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, entry.getType(), + entry.getPermission())); + } + } - //the ACL api's also expect the tradition user/group/other permission in the form of ACL - aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, sourcePerm.getUserAction())); - aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, sourcePerm.getGroupAction())); - aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, sourcePerm.getOtherAction())); + } else { + if (aclEntries.size() == 0) { + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, + sourcePerm.getGroupAction())); + } + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, + sourcePerm.getUserAction())); + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, + sourcePerm.getOtherAction())); + } } } @@ -101,11 +146,19 @@ public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileSta String aclEntry = Joiner.on(",").join(aclEntries); run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, target.toString()}); + //need to set DEFAULT ACLs when renaming directory + if (rename) { + String defaultAclEntry = Joiner.on(",").join(defaultAclEntries); + run(fsShell, new String[]{"-setfacl", "-R", "--set", defaultAclEntry, target.toString()}); + } } catch (Exception e) { LOG.info("Skipping ACL inheritance: File system for path " + target + " " + "does not support ACLs but dfs.namenode.acls.enabled is set to true. "); LOG.debug("The details are: " + e, e); } + } else { + String permission = Integer.toString(sourceStatus.getFileStatus().getPermission().toShort(), 8); + run(fsShell, new String[]{"-chmod", "-R", permission, target.toString()}); } } else { String permission = Integer.toString(sourcePerm.toShort(), 8); @@ -148,6 +201,24 @@ private static AclEntry newAclEntry(AclEntryScope scope, AclEntryType type, .setPermission(permission).build(); } /** + * Create a new AclEntry with scope, type, name and permission. + * + * @param scope + * AclEntryScope scope of the ACL entry + * @param type + * AclEntryType ACL entry type + * @param name + * String optional ACL entry name + * @param permission + * FsAction set of permissions in the ACL entry + * @return AclEntry new AclEntry + */ + private static AclEntry newAclEntry(AclEntryScope scope, AclEntryType type, + String name, FsAction permission) { + return new AclEntry.Builder().setScope(scope).setType(type).setName(name) + .setPermission(permission).build(); + } + /** * Removes basic permission acls (unamed acls) from the list of acl entries * @param entries acl entries to remove from. */ @@ -162,6 +233,24 @@ public boolean apply(AclEntry input) { } }); } + /** + * Extracts the DEFAULT ACL entries from the list of acl entries + * @param entries acl entries to extract from + * @return default unnamed acl entries + */ + private static List extractBaseDefaultAclEntries(List entries) { + List defaultAclEntries = new ArrayList(entries); + Iterables.removeIf(defaultAclEntries, new Predicate() { + @Override + public boolean apply(AclEntry input) { + if (input.getScope().equals(AclEntryScope.ACCESS)) { + return true; + } + return false; + } + }); + return defaultAclEntries; + } private static void run(FsShell shell, String[] command) throws Exception { LOG.debug(ArrayUtils.toString(command));