commit 10b5cc05a4ade05be08677b570fbd4c5c4f25d4e Author: Chris Drome Date: Tue Jul 25 18:36:51 2017 +0000 HIVE-13989: Extended ACLs are not handled according to specification. diff --git common/src/java/org/apache/hadoop/hive/common/FileUtils.java common/src/java/org/apache/hadoop/hive/common/FileUtils.java index fb28573..9a6ad58 100644 --- common/src/java/org/apache/hadoop/hive/common/FileUtils.java +++ common/src/java/org/apache/hadoop/hive/common/FileUtils.java @@ -661,7 +661,7 @@ public static boolean renameWithPerms(FileSystem fs, Path sourcePath, //rename the directory if (fs.rename(sourcePath, destPath)) { try { - HdfsUtils.setFullFileStatus(conf, new HdfsUtils.HadoopFileStatus(conf, fs, destPath.getParent()), fs, destPath, true); + HdfsUtils.setFullFileStatus(conf, new HdfsUtils.HadoopFileStatus(conf, fs, destPath.getParent()), null, fs, destPath, true, true, true); } catch (Exception e) { LOG.warn("Error setting permissions or group of " + destPath, e); } diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java index 9056f11..74b2308 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java @@ -32,6 +32,10 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.StatsSetupConst; @@ -334,7 +338,7 @@ private Partition constructPartition( String partLocnRoot, String dynPartPath, Map partKVs, HCatSchema outputSchema, Map params, Table table, FileSystem fs, - String grpName, FsPermission perms) throws IOException { + String grpName, FsPermission perms, List acls) throws IOException { Partition partition = new Partition(); partition.setDbName(table.getDbName()); @@ -371,7 +375,7 @@ private Partition constructPartition( for (FieldSchema partKey : table.getPartitionKeys()) { if (i++ != 0) { fs.mkdirs(partPath); // Attempt to make the path in case it does not exist before we check - applyGroupAndPerms(fs, partPath, perms, grpName, false); + applyGroupAndPerms(fs, partPath, perms, acls, grpName, false); } partPath = constructPartialPartPath(partPath, partKey.getName().toLowerCase(), partKVs); } @@ -381,7 +385,7 @@ private Partition constructPartition( // Need not bother in case of HDFS as permission is taken care of by setting UMask fs.mkdirs(partPath); // Attempt to make the path in case it does not exist before we check if (!ShimLoader.getHadoopShims().getHCatShim().isFileInHDFS(fs, partPath)) { - applyGroupAndPerms(fs, partPath, perms, grpName, true); + applyGroupAndPerms(fs, partPath, perms, acls, grpName, true); } // Set the location in the StorageDescriptor @@ -401,18 +405,21 @@ private Partition constructPartition( } private void applyGroupAndPerms(FileSystem fs, Path dir, FsPermission permission, - String group, boolean recursive) + List acls, String group, boolean recursive) throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("applyGroupAndPerms : " + dir + - " perms: " + permission + + " perms: " + permission + " acls: " + acls + " group: " + group + " recursive: " + recursive); } fs.setPermission(dir, permission); + if ((acls != null) && (acls.size() > 0)) { + fs.setAcl(dir, acls); + } if (recursive) { for (FileStatus fileStatus : fs.listStatus(dir)) { if (fileStatus.isDir()) { - applyGroupAndPerms(fs, fileStatus.getPath(), permission, group, true); + applyGroupAndPerms(fs, fileStatus.getPath(), permission, acls, group, true); } else { fs.setPermission(fileStatus.getPath(), permission); } @@ -774,6 +781,23 @@ private void registerPartitions(JobContext context) throws IOException{ try { HiveConf hiveConf = HCatUtil.getHiveConf(conf); client = HCatUtil.getHiveMetastoreClient(hiveConf); + + FileStatus tblStat = fs.getFileStatus(tblPath); + String grpName = tblStat.getGroup(); + FsPermission perms = tblStat.getPermission(); + ArrayList acls = null; + + if (conf.getBoolean("dfs.namenode.acls.enabled", false)) { + try { + AclStatus stat = fs.getAclStatus(tblPath); + if (hasExtendedAcls(stat)) { + acls = getDefaultAclEntries(stat, perms); + } + } catch (UnsupportedOperationException e) { + LOG.debug("Skipping ACLs", e); + } + } + if (table.getPartitionKeys().size() == 0) { // Move data from temp directory the actual table directory // No metastore operation required. @@ -787,27 +811,26 @@ private void registerPartitions(JobContext context) throws IOException{ table.getParameters().remove(StatsSetupConst.COLUMN_STATS_ACCURATE); client.alter_table(table.getDbName(), table.getTableName(), table.getTTable()); } + + applyGroupAndPerms(fs, tblPath, tblStat.getPermission(), acls, grpName, true); + return; } StorerInfo storer = InternalUtil.extractStorerInfo(table.getTTable().getSd(), table.getParameters()); - FileStatus tblStat = fs.getFileStatus(tblPath); - String grpName = tblStat.getGroup(); - FsPermission perms = tblStat.getPermission(); - List partitionsToAdd = new ArrayList(); if (!dynamicPartitioningUsed) { partitionsToAdd.add(constructPartition(context, jobInfo, tblPath.toString(), null, jobInfo.getPartitionValues(), jobInfo.getOutputSchema(), getStorerParameterMap(storer), - table, fs, grpName, perms)); + table, fs, grpName, perms, acls)); } else { for (Entry> entry : partitionsDiscoveredByPath.entrySet()) { partitionsToAdd.add(constructPartition(context, jobInfo, getPartitionRootLocation(entry.getKey(), entry.getValue().size()), entry.getKey(), entry.getValue(), jobInfo.getOutputSchema(), getStorerParameterMap(storer), table, - fs, grpName, perms)); + fs, grpName, perms, acls)); } } @@ -943,7 +966,7 @@ private void registerPartitions(JobContext context) throws IOException{ // Set permissions appropriately for each of the partitions we just created // so as to have their permissions mimic the table permissions for (Partition p : partitionsAdded){ - applyGroupAndPerms(fs,new Path(p.getSd().getLocation()),tblStat.getPermission(),tblStat.getGroup(),true); + applyGroupAndPerms(fs,new Path(p.getSd().getLocation()), tblStat.getPermission(), acls,tblStat.getGroup(),true); } } @@ -1017,5 +1040,80 @@ private void cancelDelegationTokens(JobContext context) throws IOException{ } } + private ArrayList getDefaultAclEntries(AclStatus stat, FsPermission perms) { + ArrayList defaults = new ArrayList(); + + boolean[] hasDefaults = { false, false, false, false }; + for (AclEntry e : stat.getEntries()) { + if (e.getScope() == AclEntryScope.DEFAULT) { + AclEntry acl = new AclEntry.Builder().setName(e.getName()).setScope(AclEntryScope.ACCESS) + .setType(e.getType()).setPermission(e.getPermission()).build(); + + defaults.add(acl); + defaults.add(e); + + if (e.getName() == null) { + if (e.getType() == AclEntryType.USER) { + hasDefaults[0] = true; + } + if (e.getType() == AclEntryType.GROUP) { + hasDefaults[1] = true; + } + if (e.getType() == AclEntryType.OTHER) { + hasDefaults[2] = true; + } + if (e.getType() == AclEntryType.MASK) { + hasDefaults[3] = true; + } + } + } + } + + if (! hasDefaults[0]) { + AclEntry acl = new AclEntry.Builder().setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.USER).setPermission(perms.getUserAction()).build(); + defaults.add(acl); + + hasDefaults[0] = true; + } + + if (! hasDefaults[1]) { + for (AclEntry e : stat.getEntries()) { + if ((e.getScope() == AclEntryScope.ACCESS) && (e.getType() == AclEntryType.GROUP) && (e.getName() == null)) { + AclEntry acl = new AclEntry.Builder().setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.GROUP).setPermission(e.getPermission()).build(); + defaults.add(acl); + + hasDefaults[1] = true; + } + } + } + + if (! hasDefaults[2]) { + AclEntry acl = new AclEntry.Builder().setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.OTHER).setPermission(perms.getOtherAction()).build(); + defaults.add(acl); + + hasDefaults[2] = true; + } + + if (! hasDefaults[3]) { + AclEntry acl = new AclEntry.Builder().setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.MASK).setPermission(perms.getGroupAction()).build(); + defaults.add(acl); + + hasDefaults[3] = true; + } + + return defaults; + } + + private boolean hasExtendedAcls(AclStatus status) { + if (status != null) { + return status.getEntries().size() > 0; + } + + return false; + } } diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/MiniCluster.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/MiniCluster.java index d9d8251..0064bf6 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/MiniCluster.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/MiniCluster.java @@ -49,17 +49,15 @@ private JobConf m_conf = null; private final static MiniCluster INSTANCE = new MiniCluster(); - private static boolean isSetup = true; + private static boolean isSetup = false; private MiniCluster() { - setupMiniDfsAndMrClusters(); } - private void setupMiniDfsAndMrClusters() { + private void setupMiniDfsAndMrClusters(Configuration config) { try { final int dataNodes = 1; // There will be 4 data nodes final int taskTrackers = 1; // There will be 4 task tracker nodes - Configuration config = new Configuration(); // Builds and starts the mini dfs and mapreduce clusters if(System.getProperty("hadoop.log.dir") == null) { @@ -97,8 +95,12 @@ private void setupMiniDfsAndMrClusters() { * mapreduce cluster. */ public static MiniCluster buildCluster() { + return buildCluster(new Configuration()); + } + + public static MiniCluster buildCluster(Configuration config) { if (!isSetup) { - INSTANCE.setupMiniDfsAndMrClusters(); + INSTANCE.setupMiniDfsAndMrClusters(config); isSetup = true; } return INSTANCE; @@ -133,6 +135,10 @@ private void shutdownMiniDfsAndMrClusters() { m_mr = null; } + public Configuration getConfiguration() { + return new Configuration(m_conf); + } + public Properties getProperties() { errorIfNotSetup(); Properties properties = new Properties(); diff --git itests/hcatalog-unit/pom.xml itests/hcatalog-unit/pom.xml index e338c5a..b74935e 100644 --- itests/hcatalog-unit/pom.xml +++ itests/hcatalog-unit/pom.xml @@ -101,6 +101,12 @@ ${project.version} test + + org.apache.hive + hive-it-util + ${project.version} + test + commons-io diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/pig/TestHCatExtendedAcls.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/pig/TestHCatExtendedAcls.java new file mode 100644 index 0000000..17b9c4e --- /dev/null +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/pig/TestHCatExtendedAcls.java @@ -0,0 +1,707 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hive.hcatalog.pig; + +import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; +import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT; +import static org.apache.hadoop.fs.permission.AclEntryType.GROUP; +import static org.apache.hadoop.fs.permission.AclEntryType.MASK; +import static org.apache.hadoop.fs.permission.AclEntryType.OTHER; +import static org.apache.hadoop.fs.permission.AclEntryType.USER; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hive.cli.CliSessionState; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.CommandNeedRetryException; +import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hive.hcatalog.HcatTestUtils; +import org.apache.hive.hcatalog.MiniCluster; +import org.apache.hive.hcatalog.common.HCatUtil; +import org.apache.pig.ExecType; +import org.apache.pig.PigServer; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.AfterClass; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.Random; +import java.util.Set; + +public class TestHCatExtendedAcls { + private static final Logger LOG = LoggerFactory.getLogger(TestHCatExtendedAcls.class); + private static final String TEST_DATA_DIR = HCatUtil.makePathASafeFileName(System.getProperty("java.io.tmpdir") + + File.separator + + TestHCatExtendedAcls.class.getCanonicalName() + + "-" + System.currentTimeMillis()); + private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse"; + private static final String TEXT_DATA_FILE = TEST_DATA_DIR + "/basic.input.data"; + private static final String DEFAULT_DATABASE_NAME = "test_acls"; + private static final String TABLE_NAME_PREFIX = "test_acls_"; + + private static MiniCluster cluster = null; + private static FileSystem clusterFS = null; + private static Driver driver; + private static Random random = new Random(); + private static Set dbsCreated = Sets.newHashSet(); + + private static ImmutableList dirSpec = ImmutableList.of( + aclEntry(ACCESS, USER, FsAction.ALL), + aclEntry(ACCESS, GROUP, FsAction.READ_EXECUTE), + aclEntry(ACCESS, OTHER, FsAction.NONE), + aclEntry(ACCESS, MASK, FsAction.ALL), + aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar", FsAction.ALL), + aclEntry(DEFAULT, USER, FsAction.ALL), + aclEntry(DEFAULT, GROUP, FsAction.READ_EXECUTE), + aclEntry(DEFAULT, OTHER, FsAction.NONE), + aclEntry(DEFAULT, MASK, FsAction.ALL), + aclEntry(DEFAULT, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, GROUP, "bar", FsAction.ALL)); + + private static ImmutableList restrictedDirSpec = ImmutableList.of( + aclEntry(ACCESS, USER, FsAction.ALL), + aclEntry(ACCESS, GROUP, FsAction.NONE), + aclEntry(ACCESS, OTHER, FsAction.NONE), + aclEntry(ACCESS, MASK, FsAction.ALL), + aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, USER, FsAction.ALL), + aclEntry(DEFAULT, GROUP, FsAction.NONE), + aclEntry(DEFAULT, OTHER, FsAction.NONE), + aclEntry(DEFAULT, MASK, FsAction.ALL), + aclEntry(DEFAULT, USER, "foo", FsAction.READ_EXECUTE)); + + private static ImmutableList dirSpec2 = ImmutableList.of( + aclEntry(ACCESS, USER, FsAction.ALL), + aclEntry(ACCESS, GROUP, FsAction.ALL), + aclEntry(ACCESS, OTHER, FsAction.NONE), + aclEntry(ACCESS, MASK, FsAction.ALL), + aclEntry(ACCESS, USER, "foo", FsAction.ALL), + aclEntry(ACCESS, USER, "foo2", FsAction.ALL), + aclEntry(ACCESS, GROUP, "bar", FsAction.ALL), + aclEntry(ACCESS, GROUP, "bar2", FsAction.ALL), + aclEntry(DEFAULT, USER, FsAction.ALL), + aclEntry(DEFAULT, GROUP, FsAction.READ_EXECUTE), + aclEntry(DEFAULT, OTHER, FsAction.NONE), + aclEntry(DEFAULT, MASK, FsAction.ALL), + aclEntry(DEFAULT, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, GROUP, "bar", FsAction.ALL)); + + public TestHCatExtendedAcls() { + } + + @BeforeClass + public static void setupAllTests() throws Exception { + setUpCluster(); + setUpLocalFileSystemDirectories(); + setUpClusterFileSystemDirectories(); + setUpHiveDriver(); + createTextData(); + } + + @Before + public void setupSingleTest() throws Exception { + } + + @AfterClass + public static void tearDownAllTests() throws Exception { + for (String db : dbsCreated) { + dropDatabase(db); + } + tearDownCluster(); + cleanUpLocalFileSystemDirectories(); + } + + @Test + public void testUnpartitionedTable() throws IOException, CommandNeedRetryException, FileNotFoundException { + String dbName = DEFAULT_DATABASE_NAME; + String tableName = TABLE_NAME_PREFIX + "1"; + + Path warehouseDir = new Path(TEST_WAREHOUSE_DIR); + Path dbDir = new Path(warehouseDir, dbName + ".db"); + Path tblDir = new Path(dbDir, tableName); + FileSystem fs = cluster.getFileSystem(); + + if (! dbsCreated.contains(dbName)) { + createDatabase(dbName); + dbsCreated.add(dbName); + } + + // No extended ACLs on the database directory. + fs.setPermission(dbDir, new FsPermission((short) 0750)); + + createUnpartitionedTable(dbName, tableName); + + // Extended ACLs on the table directory. + fs.setAcl(tblDir, dirSpec); + + FileStatus fstat = fs.getFileStatus(tblDir); + List acls = fs.getAclStatus(tblDir).getEntries(); + + verify(tblDir, new FsPermission((short) 0770), fstat.getPermission(), dirSpec, acls, false); + + Properties props = new Properties(); + props.setProperty("dfs.namenode.acls.enabled", "true"); + + PigServer server = getPigServer(props); + server.setBatchOn(); + int i = 0; + server.registerQuery("A = LOAD '" + TEXT_DATA_FILE + "' AS (a:int, b:chararray, c:chararray, dt:chararray, ds:chararray);", ++i); + server.registerQuery("B = FOREACH A GENERATE a, b, c;", ++i); + server.registerQuery("STORE B INTO '" + dbName + "." + tableName + "' USING org.apache.hive.hcatalog.pig.HCatStorer();", ++i); + server.executeBatch(); + + FileStatus[] fstats = fs.listStatus(tblDir); + + // All files in the table directory should inherit table ACLs. + for (FileStatus fstat1 : fstats) { + acls = fs.getAclStatus(fstat1.getPath()).getEntries(); + verify(fstat1.getPath(), new FsPermission((short) 0770), fstat1.getPermission(), dirSpec, acls, true); + } + + for (FileStatus fstat1 : fstats) { + fs.delete(fstat1.getPath(), true); + } + + fs.setAcl(tblDir, dirSpec2); + + fstat = fs.getFileStatus(tblDir); + acls = fs.getAclStatus(tblDir).getEntries(); + + verify(tblDir, new FsPermission((short) 0770), fstat.getPermission(), dirSpec2, acls, false); + + server.registerQuery("A = LOAD '" + TEXT_DATA_FILE + "' AS (a:int, b:chararray, c:chararray, dt:chararray, ds:chararray);", ++i); + server.registerQuery("B = FOREACH A GENERATE a, b, c;", ++i); + server.registerQuery("STORE B INTO '" + dbName + "." + tableName + "' USING org.apache.hive.hcatalog.pig.HCatStorer();", ++i); + server.executeBatch(); + + fstats = fs.listStatus(tblDir); + + // All files in the table directory should inherit table ACLs. + for (FileStatus fstat1 : fstats) { + acls = fs.getAclStatus(fstat1.getPath()).getEntries(); + verify(fstat1.getPath(), new FsPermission((short) 0770), fstat1.getPermission(), dirSpec, acls, true); + } + } + + @Test + public void testPartitionedTableStatic() throws IOException, CommandNeedRetryException, FileNotFoundException { + String dbName = DEFAULT_DATABASE_NAME; + String tableName = TABLE_NAME_PREFIX + "2"; + + Path warehouseDir = new Path(TEST_WAREHOUSE_DIR); + Path dbDir = new Path(warehouseDir, dbName + ".db"); + Path tblDir = new Path(dbDir, tableName); + FileSystem fs = cluster.getFileSystem(); + + if (! dbsCreated.contains(dbName)) { + createDatabase(dbName); + dbsCreated.add(dbName); + } + + // No extended ACLs on the database directory. + fs.setPermission(dbDir, new FsPermission((short) 0750)); + + createPartitionedTable(dbName, tableName); + + // Extended ACLs on the table directory. + fs.setAcl(tblDir, dirSpec); + + FileStatus fstat = fs.getFileStatus(tblDir); + List acls = fs.getAclStatus(tblDir).getEntries(); + + verify(tblDir, new FsPermission((short) 0770), fstat.getPermission(), dirSpec, acls, false); + + Properties props = new Properties(); + props.setProperty("dfs.namenode.acls.enabled", "true"); + + PigServer server = getPigServer(props); + server.setBatchOn(); + int i = 0; + server.registerQuery("A = LOAD '" + TEXT_DATA_FILE + "' AS (a:int, b:chararray, c:chararray, dt:chararray, ds:chararray);", ++i); + server.registerQuery("B = FOREACH A GENERATE a, b, c;", ++i); + server.registerQuery("STORE B INTO '" + dbName + "." + tableName + "' USING org.apache.hive.hcatalog.pig.HCatStorer('dt=1,ds=1');", ++i); + server.executeBatch(); + + // Partition directories (dt=1/ds=1) and all files in the table directory should inherit table ACLs. + Path partDir = new Path(tblDir, "dt=1"); + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), dirSpec, acls, false); + + partDir = new Path(partDir, "ds=1"); + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), dirSpec, acls, false); + + FileStatus[] fstats = fs.listStatus(partDir); + + Assert.assertTrue("Expected to find files in " + partDir, fstats.length > 0); + + for (FileStatus fstat1 : fstats) { + acls = fs.getAclStatus(fstat1.getPath()).getEntries(); + verify(fstat1.getPath(), new FsPermission((short) 0770), fstat1.getPermission(), dirSpec, acls, true); + } + + // Parent directory (dt=1) has restrictive ACLs compared to table. + // Child directory (dt=1/ds=2) should inherit from table instead of parent. + // NOTE: this behavior is different from hive and should be corrected to inherit from the parent instead. + partDir = new Path(tblDir, "dt=1"); + fs.setAcl(partDir, restrictedDirSpec); + + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), restrictedDirSpec, acls, false); + + server.registerQuery("A = LOAD '" + TEXT_DATA_FILE + "' AS (a:int, b:chararray, c:chararray, dt:chararray, ds:chararray);", ++i); + server.registerQuery("B = FOREACH A GENERATE a, b, c;", ++i); + server.registerQuery("STORE B INTO '" + dbName + "." + tableName + "' USING org.apache.hive.hcatalog.pig.HCatStorer('dt=1,ds=2');", ++i); + server.executeBatch(); + + partDir = new Path(partDir, "ds=2"); + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), dirSpec, acls, false); + + fstats = fs.listStatus(partDir); + + Assert.assertTrue("Expected to find files in " + partDir, fstats.length > 0); + + for (FileStatus fstat1 : fstats) { + acls = fs.getAclStatus(fstat1.getPath()).getEntries(); + verify(fstat1.getPath(), new FsPermission((short) 0770), fstat1.getPermission(), dirSpec, acls, true); + } + + partDir = new Path(tblDir, "dt=1"); + fs.setAcl(partDir, dirSpec2); + + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), dirSpec2, acls, false); + + server.registerQuery("A = LOAD '" + TEXT_DATA_FILE + "' AS (a:int, b:chararray, c:chararray, dt:chararray, ds:chararray);", ++i); + server.registerQuery("B = FOREACH A GENERATE a, b, c;", ++i); + server.registerQuery("STORE B INTO '" + dbName + "." + tableName + "' USING org.apache.hive.hcatalog.pig.HCatStorer('dt=1,ds=3');", ++i); + server.executeBatch(); + + partDir = new Path(partDir, "ds=3"); + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), dirSpec, acls, false); + + fstats = fs.listStatus(partDir); + + Assert.assertTrue("Expected to find files in " + partDir, fstats.length > 0); + + for (FileStatus fstat1 : fstats) { + acls = fs.getAclStatus(fstat1.getPath()).getEntries(); + verify(fstat1.getPath(), new FsPermission((short) 0770), fstat1.getPermission(), dirSpec, acls, true); + } + } + + @Test + public void testPartitionedTableDynamic() throws IOException, CommandNeedRetryException, FileNotFoundException { + String dbName = DEFAULT_DATABASE_NAME; + String tableName = TABLE_NAME_PREFIX + "3"; + + Path warehouseDir = new Path(TEST_WAREHOUSE_DIR); + Path dbDir = new Path(warehouseDir, dbName + ".db"); + Path tblDir = new Path(dbDir, tableName); + FileSystem fs = cluster.getFileSystem(); + + if (! dbsCreated.contains(dbName)) { + createDatabase(dbName); + dbsCreated.add(dbName); + } + + // No extended ACLs on the database directory. + fs.setPermission(dbDir, new FsPermission((short) 0750)); + + createPartitionedTable(dbName, tableName); + + // Extended ACLs on the table directory. + fs.setAcl(tblDir, dirSpec); + + FileStatus fstat = fs.getFileStatus(tblDir); + List acls = fs.getAclStatus(tblDir).getEntries(); + + verify(tblDir, new FsPermission((short) 0770), fstat.getPermission(), dirSpec, acls, false); + + Properties props = new Properties(); + props.setProperty("hive.exec.dynamic.partition", "true"); + props.setProperty("hive.exec.dynamic.partition.mode", "nonstrict"); + props.setProperty("dfs.namenode.acls.enabled", "true"); + + PigServer server = getPigServer(props); + server.setBatchOn(); + int i = 0; + server.registerQuery("A = LOAD '" + TEXT_DATA_FILE + "' AS (a:int, b:chararray, c:chararray, dt:chararray, ds:chararray);", ++i); + server.registerQuery("B = FILTER A BY dt == '1' AND ds == '1';", ++i); + server.registerQuery("C = FOREACH B GENERATE a, b, c, dt, ds;", ++i); + server.registerQuery("STORE C INTO '" + dbName + "." + tableName + "' USING org.apache.hive.hcatalog.pig.HCatStorer();", ++i); + server.executeBatch(); + + + // Currently, partition directories created on the dynamic partitioned code path are incorrect. + // Partition directories (dt=1/ds=1) and all files in the table directory should inherit table ACLs. + Path partDir = new Path(tblDir, "dt=1"); + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + Assert.assertEquals("Expected permission to be 0750", new FsPermission((short) 0750), fstat.getPermission()); + verifyAcls(partDir, dirSpec, acls, false); + //verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), tmpDirSpec, acls, false); + + // This directory is moved from the temporary location, so should match dirSpec. + partDir = new Path(partDir, "ds=1"); + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), dirSpec, acls, false); + + FileStatus[] fstats = fs.listStatus(partDir); + + Assert.assertTrue("Expected to find files in " + partDir, fstats.length > 0); + + for (FileStatus fstat1 : fstats) { + acls = fs.getAclStatus(fstat1.getPath()).getEntries(); + verify(fstat1.getPath(), new FsPermission((short) 0770), fstat1.getPermission(), dirSpec, acls, true); + } + + // Parent directory (dt=1) has restrictive ACLs compared to table. + // Child directory (dt=1/ds=2) should inherit from table instead of parent. + // NOTE: this behavior is different from hive and should be corrected to inherit from the parent instead. + partDir = new Path(tblDir, "dt=1"); + fs.setAcl(partDir, restrictedDirSpec); + + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), restrictedDirSpec, acls, false); + + server.registerQuery("A = LOAD '" + TEXT_DATA_FILE + "' AS (a:int, b:chararray, c:chararray, dt:chararray, ds:chararray);", ++i); + server.registerQuery("B = FILTER A BY dt == '1' AND ds == '2';", ++i); + server.registerQuery("C = FOREACH B GENERATE a, b, c, dt, ds;", ++i); + server.registerQuery("STORE C INTO '" + dbName + "." + tableName + "' USING org.apache.hive.hcatalog.pig.HCatStorer();", ++i); + server.executeBatch(); + + partDir = new Path(partDir, "ds=2"); + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), dirSpec, acls, false); + + fstats = fs.listStatus(partDir); + + Assert.assertTrue("Expected to find files in " + partDir, fstats.length > 0); + + for (FileStatus fstat1 : fstats) { + acls = fs.getAclStatus(fstat1.getPath()).getEntries(); + verify(fstat1.getPath(), new FsPermission((short) 0770), fstat1.getPermission(), dirSpec, acls, true); + } + + partDir = new Path(tblDir, "dt=1"); + fs.setAcl(partDir, dirSpec2); + + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), dirSpec2, acls, false); + + server.registerQuery("A = LOAD '" + TEXT_DATA_FILE + "' AS (a:int, b:chararray, c:chararray, dt:chararray, ds:chararray);", ++i); + server.registerQuery("B = FILTER A BY dt == '1' AND ds == '3';", ++i); + server.registerQuery("C = FOREACH B GENERATE a, b, c, dt, ds;", ++i); + server.registerQuery("STORE C INTO '" + dbName + "." + tableName + "' USING org.apache.hive.hcatalog.pig.HCatStorer();", ++i); + server.executeBatch(); + + partDir = new Path(partDir, "ds=3"); + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), dirSpec, acls, false); + + fstats = fs.listStatus(partDir); + + Assert.assertTrue("Expected to find files in " + partDir, fstats.length > 0); + + for (FileStatus fstat1 : fstats) { + acls = fs.getAclStatus(fstat1.getPath()).getEntries(); + verify(fstat1.getPath(), new FsPermission((short) 0770), fstat1.getPermission(), dirSpec, acls, true); + } + } + + private static void setUpCluster() throws Exception { + Configuration config = new Configuration(); + config.set("dfs.namenode.acls.enabled", "true"); + + cluster = MiniCluster.buildCluster(config); + clusterFS = cluster.getFileSystem(); + } + + private static void tearDownCluster() throws Exception { + cluster.shutDown(); + } + + private static void setUpLocalFileSystemDirectories() { + File f = new File(TEST_DATA_DIR); + if (f.exists()) { + FileUtil.fullyDelete(f); + } + if(!(new File(TEST_DATA_DIR).mkdirs())) { + throw new RuntimeException("Could not create test-directory " + TEST_DATA_DIR + " on local filesystem."); + } + } + + private static void cleanUpLocalFileSystemDirectories() { + File f = new File(TEST_DATA_DIR); + if (f.exists()) { + FileUtil.fullyDelete(f); + } + } + + private static void setUpClusterFileSystemDirectories() throws IOException { + FileSystem clusterFS = cluster.getFileSystem(); + Path warehouseDir = new Path(TEST_WAREHOUSE_DIR); + if (clusterFS.exists(warehouseDir)) { + clusterFS.delete(warehouseDir, true); + } + clusterFS.mkdirs(warehouseDir); + } + + private static void setUpHiveDriver() throws IOException { + HiveConf hiveConf = createHiveConf(); + driver = new Driver(hiveConf); + driver.setMaxRows(1000); + SessionState.start(new CliSessionState(hiveConf)); + } + + private static HiveConf createHiveConf() { + HiveConf hiveConf = new HiveConf(cluster.getConfiguration(), TestHCatExtendedAcls.class); + hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); + return hiveConf; + } + + /** + * Create data with schema: + * number \t string \t filler_string + * @throws Exception + */ + private static void createTextData() throws Exception { + int LOOP_SIZE = 100; + ArrayList input = Lists.newArrayListWithExpectedSize((LOOP_SIZE+1) * LOOP_SIZE); + for (int i = 1; i <= LOOP_SIZE; i++) { + String si = i + ""; + String sk = i % 2 == 0 ? "1" : "2"; + for (int j = 1; j <= LOOP_SIZE; j++) { + String sj = "S" + j + "S"; + String sm = Integer.toString((j % 3) + 1); + input.add(si + "\t" + (i*j) + "\t" + sj + "\t" + sk + "\t" + sm); + } + } + + // Add nulls. + for (int i=0; i> 1) + 1); + input.add("\t" + "\t" + "S" + "_null_" + i + "_S" + "\t" + sk + "\t" + sm); + } + + HcatTestUtils.createTestDataFile(TEXT_DATA_FILE, input.toArray(new String[input.size()])); + FileSystem fs = cluster.getFileSystem(); + fs.copyFromLocalFile(new Path(TEXT_DATA_FILE), new Path(TEXT_DATA_FILE)); + } + + private void createDatabase(String dbName) throws IOException, CommandNeedRetryException { + String cmd = "CREATE DATABASE IF NOT EXISTS " + dbName; + executeStatementOnDriver(cmd); + + cmd = "DESC DATABASE " + dbName; + executeStatementOnDriver(cmd); + } + + private void createUnpartitionedTable(String dbName, String tableName) throws IOException, CommandNeedRetryException { + createTable(dbName, tableName, "a INT, b STRING, c STRING", null); + } + + private void createPartitionedTable(String dbName, String tableName) throws IOException, CommandNeedRetryException { + createTable(dbName, tableName, "a INT, b STRING, c STRING", "dt STRING, ds STRING"); + } + + private void createTable(String dbName, String tableName, String schema, String partitionedBy) + throws IOException, CommandNeedRetryException { + String cmd = "CREATE TABLE " + dbName + "." + tableName + "(" + schema + ") "; + if ((partitionedBy != null) && (!partitionedBy.trim().isEmpty())) { + cmd = cmd + "PARTITIONED BY (" + partitionedBy + ") "; + } + executeStatementOnDriver(cmd); + + cmd = "DESC FORMATTED " + dbName + "." + tableName; + executeStatementOnDriver(cmd); + } + + /** + * Execute Hive CLI statement + * @param cmd arbitrary statement to execute + */ + private void executeStatementOnDriver(String cmd) throws IOException, CommandNeedRetryException { + LOG.debug("Executing: " + cmd); + CommandProcessorResponse cpr = driver.run(cmd); + if(cpr.getResponseCode() != 0) { + throw new IOException("Failed to execute \"" + cmd + "\". " + + "Driver returned " + cpr.getResponseCode() + + " Error: " + cpr.getErrorMessage()); + } + } + + private static void dropDatabase(String dbName) throws IOException, CommandNeedRetryException { + driver.run("DROP DATABASE IF EXISTS " + dbName + " CASCADE"); + } + + private PigServer getPigServer() throws IOException { + return getPigServer(null); + } + + private PigServer getPigServer(Properties props) throws IOException { + if (props != null) { + return new PigServer(ExecType.LOCAL, props); + } else { + return new PigServer(ExecType.LOCAL); + } + } + + /** + * Create a new AclEntry with scope, type and permission (no name). + * + * @param scope AclEntryScope scope of the ACL entry + * @param type AclEntryType ACL entry type + * @param permission FsAction set of permissions in the ACL entry + * @return new AclEntry + */ + private static AclEntry aclEntry(AclEntryScope scope, AclEntryType type, + FsAction permission) { + return new AclEntry.Builder().setScope(scope).setType(type) + .setPermission(permission).build(); + } + + /** + * Create a new AclEntry with scope, type, name and permission. + * + * @param scope AclEntryScope scope of the ACL entry + * @param type AclEntryType ACL entry type + * @param name String optional ACL entry name + * @param permission FsAction set of permissions in the ACL entry + * @return new AclEntry + */ + private static AclEntry aclEntry(AclEntryScope scope, AclEntryType type, + String name, FsAction permission) { + return new AclEntry.Builder().setScope(scope).setType(type).setName(name) + .setPermission(permission).build(); + } + + private void verify(Path path, FsPermission expectedPerm, FsPermission actualPerm, + List expectedAcls, List actualAcls, boolean isFile) { + FsAction maskPerm = null; + + LOG.debug("Verify permissions on " + path + " expected=" + expectedPerm + " " + expectedAcls + " actual=" + actualPerm + " " + actualAcls); + + for (AclEntry expected : expectedAcls) { + if (expected.getType() == MASK && expected.getScope() == DEFAULT) { + maskPerm = expected.getPermission(); + break; + } + } + + Assert.assertTrue("Permissions on " + path + " differ: expected=" + expectedPerm + " actual=" + actualPerm, + expectedPerm.getUserAction() == actualPerm.getUserAction()); + Assert.assertTrue("Permissions on " + path + " differ: expected=" + expectedPerm + " actual=" + actualPerm, + expectedPerm.getOtherAction() == actualPerm.getOtherAction()); + Assert.assertTrue("Mask permissions on " + path + " differ: expected=" + maskPerm + " actual=" + actualPerm, + maskPerm == actualPerm.getGroupAction()); + verifyAcls(path, expectedAcls, actualAcls, isFile); + } + + private void verifyAcls(Path path, List expectedAcls, List actualAcls, boolean isFile) { + ArrayList acls = new ArrayList(actualAcls); + + for (AclEntry expected : expectedAcls) { + if (! isFile && expected.getScope() == DEFAULT) { + boolean found = false; + for (AclEntry acl : acls) { + if (acl.equals(expected)) { + acls.remove(acl); + found = true; + break; + } + } + + Assert.assertTrue("ACLs on " + path + " differ: " + expected + " expected=" + expectedAcls + " actual=" + actualAcls, found); + } else if (expected.getName() != null || expected.getType() == GROUP) { + // Check file permissions for non-named ACLs, except for GROUP. + if (isFile && expected.getScope() == DEFAULT) { + // Files should not have DEFAULT ACLs. + continue; + } + + boolean found = false; + for (AclEntry acl : acls) { + if (acl.equals(expected)) { + acls.remove(acl); + found = true; + break; + } + } + + Assert.assertTrue("ACLs on " + path + " differ: " + expected + " expected=" + expectedAcls + " actual=" + actualAcls, found); + } + } + + Assert.assertTrue("More ACLs on " + path + " than expected: actual=" + acls, acls.size() == 0); + } +} diff --git itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java index b798379..87f7774 100644 --- itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java +++ itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java @@ -18,12 +18,16 @@ package org.apache.hadoop.hive.ql.security; import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; +import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT; import static org.apache.hadoop.fs.permission.AclEntryType.GROUP; +import static org.apache.hadoop.fs.permission.AclEntryType.MASK; import static org.apache.hadoop.fs.permission.AclEntryType.OTHER; import static org.apache.hadoop.fs.permission.AclEntryType.USER; +import java.util.ArrayList; import java.util.List; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; @@ -31,14 +35,15 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.junit.Assert; import org.junit.BeforeClass; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; public class TestExtendedAcls extends FolderPermissionBase { + private static final Logger LOG = LoggerFactory.getLogger(TestExtendedAcls.class); @BeforeClass public static void setup() throws Exception { @@ -53,6 +58,24 @@ public static void setup() throws Exception { aclEntry(ACCESS, USER, FsAction.ALL), aclEntry(ACCESS, GROUP, FsAction.ALL), aclEntry(ACCESS, OTHER, FsAction.ALL), + aclEntry(ACCESS, MASK, FsAction.ALL), + aclEntry(ACCESS, USER, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, GROUP, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, USER, FsAction.ALL), + aclEntry(DEFAULT, GROUP, FsAction.ALL), + aclEntry(DEFAULT, OTHER, FsAction.ALL), + aclEntry(DEFAULT, MASK, FsAction.ALL), + aclEntry(DEFAULT, USER, "bar", FsAction.READ_WRITE), + aclEntry(DEFAULT, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, GROUP, "bar", FsAction.READ_WRITE), + aclEntry(DEFAULT, GROUP, "foo", FsAction.READ_EXECUTE)); + + private final ImmutableList aclSpec1ForFile = ImmutableList.of( + aclEntry(ACCESS, USER, FsAction.ALL), + aclEntry(ACCESS, GROUP, FsAction.ALL), + aclEntry(ACCESS, OTHER, FsAction.ALL), aclEntry(ACCESS, USER, "bar", FsAction.READ_WRITE), aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", FsAction.READ_WRITE), @@ -62,6 +85,24 @@ public static void setup() throws Exception { aclEntry(ACCESS, USER, FsAction.ALL), aclEntry(ACCESS, GROUP, FsAction.ALL), aclEntry(ACCESS, OTHER, FsAction.READ_EXECUTE), + aclEntry(ACCESS, MASK, FsAction.ALL), + aclEntry(ACCESS, USER, "bar2", FsAction.READ_WRITE), + aclEntry(ACCESS, USER, "foo2", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar2", FsAction.READ), + aclEntry(ACCESS, GROUP, "foo2", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, USER, FsAction.ALL), + aclEntry(DEFAULT, GROUP, FsAction.ALL), + aclEntry(DEFAULT, OTHER, FsAction.READ_EXECUTE), + aclEntry(DEFAULT, MASK, FsAction.ALL), + aclEntry(DEFAULT, USER, "bar2", FsAction.READ_WRITE), + aclEntry(DEFAULT, USER, "foo2", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, GROUP, "bar2", FsAction.READ), + aclEntry(DEFAULT, GROUP, "foo2", FsAction.READ_EXECUTE)); + + private final ImmutableList aclSpec2ForFile = ImmutableList.of( + aclEntry(ACCESS, USER, FsAction.ALL), + aclEntry(ACCESS, GROUP, FsAction.ALL), + aclEntry(ACCESS, OTHER, FsAction.READ_EXECUTE), aclEntry(ACCESS, USER, "bar2", FsAction.READ_WRITE), aclEntry(ACCESS, USER, "foo2", FsAction.READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar2", FsAction.READ), @@ -69,12 +110,15 @@ public static void setup() throws Exception { @Override public void setPermission(String locn, int permIndex) throws Exception { + Path location = new Path(locn); + FileStatus fstat = fs.getFileStatus(location); + switch (permIndex) { case 0: - setAcl(locn, aclSpec1); + fs.setAcl(new Path(locn), fstat.isDirectory() ? aclSpec1 : aclSpec1ForFile); break; case 1: - setAcl(locn, aclSpec2); + fs.setAcl(new Path(locn), fstat.isDirectory() ? aclSpec2 : aclSpec2ForFile); break; default: throw new RuntimeException("Only 2 permissions by this test"); @@ -83,20 +127,22 @@ public void setPermission(String locn, int permIndex) throws Exception { @Override public void verifyPermission(String locn, int permIndex) throws Exception { + Path location = new Path(locn); + FileStatus fstat = fs.getFileStatus(location); + FsPermission perm = fstat.getPermission(); + switch (permIndex) { case 0: - FsPermission perm = fs.getFileStatus(new Path(locn)).getPermission(); Assert.assertEquals("Location: " + locn, "rwxrwxrwx", String.valueOf(perm)); - List actual = getAcl(locn); - verifyAcls(aclSpec1, actual); + List actual = fs.getAclStatus(location).getEntries(); + verifyAcls(location, aclSpec1, actual, fstat.isFile()); break; case 1: - perm = fs.getFileStatus(new Path(locn)).getPermission(); Assert.assertEquals("Location: " + locn, "rwxrwxr-x", String.valueOf(perm)); - List acls = getAcl(locn); - verifyAcls(aclSpec2, acls); + List acls = fs.getAclStatus(location).getEntries(); + verifyAcls(location, aclSpec2, acls, fstat.isFile()); break; default: throw new RuntimeException("Only 2 permissions by this test: " + permIndex); @@ -139,28 +185,43 @@ private AclEntry aclEntry(AclEntryScope scope, AclEntryType type, .setPermission(permission).build(); } - private void verifyAcls(List expectedList, List actualList) { - for (AclEntry expected : expectedList) { - if (expected.getName() != null) { - //the non-named acl's are coming as regular permission, and not as aclEntries. + private void verifyAcls(Path path, List expectedAcls, List actualAcls, boolean isFile) { + LOG.debug("Verify ACLs on " + path + " expected=" + expectedAcls + " actual=" + actualAcls); + + ArrayList acls = new ArrayList(actualAcls); + + for (AclEntry expected : expectedAcls) { + if (! isFile && expected.getScope() == DEFAULT) { boolean found = false; - for (AclEntry actual : actualList) { - if (actual.equals(expected)) { + for (AclEntry acl : acls) { + if (acl.equals(expected)) { + acls.remove(acl); found = true; + break; } } - if (!found) { - Assert.fail("Following Acl does not have a match: " + expected); + + Assert.assertTrue("ACLs on " + path + " differ: " + expected + " expected=" + expectedAcls + " actual=" + actualAcls, found); + } else if (expected.getName() != null || expected.getType() == GROUP) { + // Check file permissions for non-named ACLs, except for GROUP. + if (isFile && expected.getScope() == DEFAULT) { + // Files should not have DEFAULT ACLs. + continue; } + + boolean found = false; + for (AclEntry acl : acls) { + if (acl.equals(expected)) { + acls.remove(acl); + found = true; + break; + } + } + + Assert.assertTrue("ACLs on " + path + " differ: " + expected + " expected=" + expectedAcls + " actual=" + actualAcls, found); } } - } - - private void setAcl(String locn, List aclSpec) throws Exception { - fs.setAcl(new Path(locn), aclSpec); - } - private List getAcl(String locn) throws Exception { - return fs.getAclStatus(new Path(locn)).getEntries(); + Assert.assertTrue("More ACLs on " + path + " than expected: actual=" + acls, acls.size() == 0); } } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java index 2ae9cc0..589adde 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java @@ -65,7 +65,6 @@ public boolean accept(Path p){ public abstract void verifyPermission(String locn, int permIndex) throws Exception; - public void setPermission(String locn) throws Exception { setPermission(locn, 0); } @@ -159,9 +158,15 @@ public void testCreateDb() throws Exception { verifyPermission(child); } + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); + ret = driver.run("USE default"); Assert.assertEquals(0,ret.getResponseCode()); + ret = driver.run("DROP DATABASE " + testDb); + Assert.assertEquals(0,ret.getResponseCode()); + //cleanup after the test. fs.delete(warehouseDir, true); fs.mkdirs(warehouseDir); @@ -199,8 +204,14 @@ public void testCreateTable() throws Exception { verifyPermission(child); } + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); + ret = driver.run("USE default"); Assert.assertEquals(0,ret.getResponseCode()); + + ret = driver.run("DROP DATABASE " + testDb); + Assert.assertEquals(0,ret.getResponseCode()); } @@ -236,6 +247,9 @@ public void testInsertNonPartTable() throws Exception { for (String child : listStatus(tableLoc)) { verifyPermission(child, 1); } + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -260,18 +274,20 @@ public void testInsertStaticSinglePartition() throws Exception { } //insert overwrite test + //only set the permissions for table, partitions will inherit their permissions from the table's permissions setPermission(warehouseDir + "/" + tableName, 1); - setPermission(warehouseDir + "/" + tableName + "/part1=1", 1); ret = driver.run("insert overwrite table " + tableName + " partition(part1='1') select key,value from mysrc where part1='1' and part2='1'"); Assert.assertEquals(0, ret.getResponseCode()); - verifyPermission(warehouseDir + "/" + tableName, 1); - verifyPermission(warehouseDir + "/" + tableName + "/part1=1", 1); + verifyPermission(warehouseDir + "/" + tableName + "/part1=1", 0); Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1").size() > 0); for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1")) { - verifyPermission(child, 1); + verifyPermission(child, 0); } + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -297,21 +313,22 @@ public void testInsertStaticDualPartition() throws Exception { } //insert overwrite test + //only set the permissions for table, partitions will inherit their permissions from the table's permissions setPermission(warehouseDir + "/" + tableName, 1); - setPermission(warehouseDir + "/" + tableName + "/part1=1", 1); - setPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1", 1); ret = driver.run("insert overwrite table " + tableName + " partition(part1='1', part2='1') select key,value from mysrc where part1='1' and part2='1'"); Assert.assertEquals(0, ret.getResponseCode()); - verifyPermission(warehouseDir + "/" + tableName, 1); - verifyPermission(warehouseDir + "/" + tableName + "/part1=1", 1); - verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1", 1); + verifyPermission(warehouseDir + "/" + tableName + "/part1=1", 0); + verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1", 0); Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0); for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) { - verifyPermission(child, 1); + verifyPermission(child, 0); } + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -335,7 +352,19 @@ public void testInsertDualDynamicPartitions() throws Exception { ret = driver.run("insert overwrite table " + tableName + " partition (part1,part2) select key,value,part1,part2 from mysrc"); Assert.assertEquals(0, ret.getResponseCode()); - verifyDualPartitionTable(warehouseDir + "/" + tableName, 1); + //cannot verify table's and partition's inherited permissions since they were explicitly set + Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0); + for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) { + verifyPermission(child, 1); + } + + Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2").size() > 0); + for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2")) { + verifyPermission(child, 1); + } + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -351,14 +380,15 @@ public void testInsertSingleDynamicPartition() throws Exception { setPermission(tableLoc, 0); ret = driver.run("insert into table " + tableName + " partition (part1) select key,value,part1 from mysrc"); Assert.assertEquals(0,ret.getResponseCode()); - verifySinglePartition(tableLoc, 0); + verifySinglePartition(tableLoc, 0, true); //Insert overwrite test, with permission set 1. We need reset existing partitions to 1 since the permissions //should be inherited from existing partition setSinglePartition(tableLoc, 1); ret = driver.run("insert overwrite table " + tableName + " partition (part1) select key,value,part1 from mysrc"); Assert.assertEquals(0,ret.getResponseCode()); - verifySinglePartition(tableLoc, 1); + //cannot verify the partition's permissions here since they were explicitly set + verifySinglePartition(tableLoc, 1, false); //delete and re-insert using insert overwrite. There's different code paths insert vs insert overwrite for new tables. ret = driver.run("DROP TABLE " + tableName); @@ -372,7 +402,10 @@ public void testInsertSingleDynamicPartition() throws Exception { ret = driver.run("insert overwrite table " + tableName + " partition (part1) select key,value,part1 from mysrc"); Assert.assertEquals(0, ret.getResponseCode()); - verifySinglePartition(tableLoc, 0); + verifySinglePartition(tableLoc, 0, true); + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -417,6 +450,12 @@ public void testPartition() throws Exception { verifyPermission(warehouseDir + "/" + tableName2 + "/part1=2", 0); verifyPermission(warehouseDir + "/" + tableName2 + "/part1=2/part2=2", 0); verifyPermission(warehouseDir + "/" + tableName2 + "/part1=2/part2=2/part3=2", 1); + + ret = driver.run("DROP TABLE " + tableName2); + Assert.assertEquals(0,ret.getResponseCode()); + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -438,6 +477,9 @@ public void testExternalTable() throws Exception { for (String child : listStatus(myLocation)) { verifyPermission(child); } + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -475,6 +517,9 @@ public void testLoadLocal() throws Exception { verifyPermission(child, 1); } + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); + //case 2 is partitioned table. tableName = "loadlocalpartition"; @@ -508,6 +553,9 @@ public void testLoadLocal() throws Exception { for (String child : listStatus(partLoc)) { verifyPermission(child, 1); } + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -549,20 +597,20 @@ public void testLoad() throws Exception { } //case 2 is partitioned table. - tableName = "loadpartition"; + String tableName2 = "loadpartition"; - ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int)"); + ret = driver.run("CREATE TABLE " + tableName2 + " (key string, value string) partitioned by (part1 int, part2 int)"); Assert.assertEquals(0,ret.getResponseCode()); - tableLoc = warehouseDir + "/" + tableName; + tableLoc = warehouseDir + "/" + tableName2; assertExistence(tableLoc); //case 2A: load data into partitioned table. setPermission(tableLoc); fs.copyFromLocalFile(dataFilePath, new Path(location)); - ret = driver.run("LOAD DATA INPATH '" + location + "' INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')"); + ret = driver.run("LOAD DATA INPATH '" + location + "' INTO TABLE " + tableName2 + " PARTITION (part1='1',part2='1')"); Assert.assertEquals(0,ret.getResponseCode()); - String partLoc = warehouseDir + "/" + tableName + "/part1=1/part2=1"; + String partLoc = warehouseDir + "/" + tableName2 + "/part1=1/part2=1"; Assert.assertTrue(listStatus(partLoc).size() > 0); for (String child : listStatus(partLoc)) { verifyPermission(child); @@ -578,13 +626,19 @@ public void testLoad() throws Exception { } fs.copyFromLocalFile(dataFilePath, new Path(location)); - ret = driver.run("LOAD DATA INPATH '" + location + "' OVERWRITE INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')"); + ret = driver.run("LOAD DATA INPATH '" + location + "' OVERWRITE INTO TABLE " + tableName2 + " PARTITION (part1='1',part2='1')"); Assert.assertEquals(0,ret.getResponseCode()); Assert.assertTrue(listStatus(tableLoc).size() > 0); for (String child : listStatus(partLoc)) { verifyPermission(child, 1); } + + ret = driver.run("DROP TABLE " + tableName2); + Assert.assertEquals(0,ret.getResponseCode()); + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -612,8 +666,14 @@ public void testCtas() throws Exception { verifyPermission(child); } + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); + ret = driver.run("USE default"); Assert.assertEquals(0,ret.getResponseCode()); + + ret = driver.run("DROP DATABASE " + testDb); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -683,6 +743,12 @@ public void testExim() throws Exception { for (String child : listStatus(myLocation + "/part1=2/part2=2")) { verifyPermission(child, 1); } + + ret = driver.run("USE default"); + Assert.assertEquals(0,ret.getResponseCode()); + + ret = driver.run("DROP DATABASE " + testDb + " CASCADE"); + Assert.assertEquals(0,ret.getResponseCode()); } /** @@ -727,6 +793,9 @@ public void testTruncateTable() throws Exception { assertExistence(partition); verifyPermission(partition); + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } private void setSinglePartition(String tableLoc, int index) throws Exception { @@ -734,9 +803,11 @@ private void setSinglePartition(String tableLoc, int index) throws Exception { setPermission(tableLoc + "/part1=2", index); } - private void verifySinglePartition(String tableLoc, int index) throws Exception { - verifyPermission(tableLoc + "/part1=1", index); - verifyPermission(tableLoc + "/part1=2", index); + private void verifySinglePartition(String tableLoc, int index, boolean partitions) throws Exception { + if(partitions) { + verifyPermission(tableLoc + "/part1=1", index); + verifyPermission(tableLoc + "/part1=2", index); + } Assert.assertTrue(listStatus(tableLoc + "/part1=1").size() > 0); for (String child : listStatus(tableLoc + "/part1=1")) { @@ -759,7 +830,6 @@ private void setDualPartitionTable(String baseTablePath, int index) throws Excep } private void verifyDualPartitionTable(String baseTablePath, int index) throws Exception { - verifyPermission(baseTablePath, index); verifyPermission(baseTablePath + "/part1=1", index); verifyPermission(baseTablePath + "/part1=1/part2=1", index); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index e320dbf..2522b60 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -2863,7 +2863,7 @@ private static void copyFiles(final HiveConf conf, final FileSystem destFs, } if (inheritPerms) { - HdfsUtils.setFullFileStatus(conf, fullDestStatus, srcGroup, destFs, destPath, false); + HdfsUtils.setFullFileStatus(conf, fullDestStatus, srcGroup, destFs, destPath, false, false, false); } if (null != newFiles) { newFiles.add(destPath); @@ -3086,7 +3086,7 @@ public Void call() throws Exception { final String group = srcStatus.getGroup(); if(destFs.rename(srcStatus.getPath(), destFile)) { if (inheritPerms) { - HdfsUtils.setFullFileStatus(conf, desiredStatus, group, destFs, destFile, false); + HdfsUtils.setFullFileStatus(conf, desiredStatus, group, destFs, destFile, false, false, false); } } else { throw new IOException("rename for src path: " + srcStatus.getPath() + " to dest path:" diff --git shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java index 7b6a9bd..3ddb615 100644 --- shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java +++ shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java @@ -60,26 +60,71 @@ public static Path getFileIdPath( public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus, FileSystem fs, Path target, boolean recursion) throws IOException { - setFullFileStatus(conf, sourceStatus, null, fs, target, recursion); + setFullFileStatus(conf, sourceStatus, null, fs, target, recursion, true, false); } public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus, - String targetGroup, FileSystem fs, Path target, boolean recursion) throws IOException { + String targetGroup, FileSystem fs, Path target, boolean recursion) throws IOException { + // Assume source is a directory for purposes of ACLs. + setFullFileStatus(conf, sourceStatus, targetGroup, fs, target, recursion, true, false); + } + + // This method should be called when dealing with ACLs on files, because default ACLs cannot + // be applied to files; an exception will be thrown. + public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus, + String targetGroup, FileSystem fs, Path target, boolean recursion, boolean isDir, + boolean rename) throws IOException { FileStatus fStatus= sourceStatus.getFileStatus(); String group = fStatus.getGroup(); boolean aclEnabled = Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true"); FsPermission sourcePerm = fStatus.getPermission(); List aclEntries = null; + List defaultAclEntries = null; if (aclEnabled) { if (sourceStatus.getAclEntries() != null) { LOG.trace(sourceStatus.aclStatus.toString()); aclEntries = new ArrayList<>(sourceStatus.getAclEntries()); - removeBaseAclEntries(aclEntries); + defaultAclEntries = extractBaseDefaultAclEntries(aclEntries); + if (defaultAclEntries.size() > 0) { + removeBaseAclEntries(aclEntries); + + // remove base acl entries if there is a default acl set for that named user|group + List temp = new ArrayList(); + for (AclEntry entry : aclEntries) { + if (defaultAclEntries.contains(entry)) { + for (AclEntry deleteEntry : aclEntries) { + if (deleteEntry.getType().equals(entry.getType()) + && deleteEntry.getName().equals(entry.getName())) { + temp.add(deleteEntry); + } + } + } + } + if (temp.size() > 0) { + aclEntries.removeAll(temp); + } + + // set directory's ACL entries based on parent's DEFAULT entries + for (AclEntry entry : defaultAclEntries) { + if (entry.getName() != null) { + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, entry.getType(), entry.getName(), + entry.getPermission())); + } else { + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, entry.getType(), + entry.getPermission())); + } + } - //the ACL api's also expect the tradition user/group/other permission in the form of ACL - aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, sourcePerm.getUserAction())); - aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, sourcePerm.getGroupAction())); - aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, sourcePerm.getOtherAction())); + } else { + if (aclEntries.size() == 0) { + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, + sourcePerm.getGroupAction())); + } + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, + sourcePerm.getUserAction())); + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, + sourcePerm.getOtherAction())); + } } } @@ -101,11 +146,19 @@ public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileSta String aclEntry = Joiner.on(",").join(aclEntries); run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, target.toString()}); + //need to set DEFAULT ACLs when renaming directory + if (rename) { + String defaultAclEntry = Joiner.on(",").join(defaultAclEntries); + run(fsShell, new String[]{"-setfacl", "-R", "--set", defaultAclEntry, target.toString()}); + } } catch (Exception e) { LOG.info("Skipping ACL inheritance: File system for path " + target + " " + "does not support ACLs but dfs.namenode.acls.enabled is set to true. "); LOG.debug("The details are: " + e, e); } + } else { + String permission = Integer.toString(sourceStatus.getFileStatus().getPermission().toShort(), 8); + run(fsShell, new String[]{"-chmod", "-R", permission, target.toString()}); } } else { String permission = Integer.toString(sourcePerm.toShort(), 8); @@ -148,6 +201,24 @@ private static AclEntry newAclEntry(AclEntryScope scope, AclEntryType type, .setPermission(permission).build(); } /** + * Create a new AclEntry with scope, type, name and permission. + * + * @param scope + * AclEntryScope scope of the ACL entry + * @param type + * AclEntryType ACL entry type + * @param name + * String optional ACL entry name + * @param permission + * FsAction set of permissions in the ACL entry + * @return AclEntry new AclEntry + */ + private static AclEntry newAclEntry(AclEntryScope scope, AclEntryType type, + String name, FsAction permission) { + return new AclEntry.Builder().setScope(scope).setType(type).setName(name) + .setPermission(permission).build(); + } + /** * Removes basic permission acls (unamed acls) from the list of acl entries * @param entries acl entries to remove from. */ @@ -162,40 +233,59 @@ public boolean apply(AclEntry input) { } }); } + /** + * Extracts the DEFAULT ACL entries from the list of acl entries + * @param entries acl entries to extract from + * @return default unnamed acl entries + */ + private static List extractBaseDefaultAclEntries(List entries) { + List defaultAclEntries = new ArrayList(entries); + Iterables.removeIf(defaultAclEntries, new Predicate() { + @Override + public boolean apply(AclEntry input) { + if (input.getScope().equals(AclEntryScope.ACCESS)) { + return true; + } + return false; + } + }); + return defaultAclEntries; + } private static void run(FsShell shell, String[] command) throws Exception { LOG.debug(ArrayUtils.toString(command)); int retval = shell.run(command); LOG.debug("Return value is :" + retval); } -public static class HadoopFileStatus { - private final FileStatus fileStatus; - private final AclStatus aclStatus; + public static class HadoopFileStatus { - public HadoopFileStatus(Configuration conf, FileSystem fs, Path file) throws IOException { + private final FileStatus fileStatus; + private final AclStatus aclStatus; - FileStatus fileStatus = fs.getFileStatus(file); - AclStatus aclStatus = null; - if (Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true")) { - //Attempt extended Acl operations only if its enabled, but don't fail the operation regardless. - try { - aclStatus = fs.getAclStatus(file); - } catch (Exception e) { - LOG.info("Skipping ACL inheritance: File system for path " + file + " " + - "does not support ACLs but dfs.namenode.acls.enabled is set to true. "); - LOG.debug("The details are: " + e, e); - } - }this.fileStatus = fileStatus; - this.aclStatus = aclStatus; - } + public HadoopFileStatus(Configuration conf, FileSystem fs, Path file) throws IOException { - public FileStatus getFileStatus() { - return fileStatus; - } + FileStatus fileStatus = fs.getFileStatus(file); + AclStatus aclStatus = null; + if (Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true")) { + //Attempt extended Acl operations only if its enabled, but don't fail the operation regardless. + try { + aclStatus = fs.getAclStatus(file); + } catch (Exception e) { + LOG.info("Skipping ACL inheritance: File system for path " + file + " " + + "does not support ACLs but dfs.namenode.acls.enabled is set to true. "); + LOG.debug("The details are: " + e, e); + } + }this.fileStatus = fileStatus; + this.aclStatus = aclStatus; + } + + public FileStatus getFileStatus() { + return fileStatus; + } - public List getAclEntries() { - return aclStatus == null ? null : Collections.unmodifiableList(aclStatus.getEntries()); + public List getAclEntries() { + return aclStatus == null ? null : Collections.unmodifiableList(aclStatus.getEntries()); + } } } -}