commit 1864b6dc2454e5a795ca7308d576b5b83ace2a8c Author: Chris Drome Date: Fri Aug 18 04:23:18 2017 +0000 HIVE-13989: Extended ACLs are not handled according to specification diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java index 9056f11..4c11181 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java @@ -32,6 +32,11 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.StatsSetupConst; @@ -64,6 +69,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.base.Predicate; +import com.google.common.collect.Iterables; + /** * Part of the FileOutput*Container classes * See {@link FileOutputFormatContainer} for more information @@ -334,7 +342,7 @@ private Partition constructPartition( String partLocnRoot, String dynPartPath, Map partKVs, HCatSchema outputSchema, Map params, Table table, FileSystem fs, - String grpName, FsPermission perms) throws IOException { + String grpName, FsPermission perms, List acls) throws IOException { Partition partition = new Partition(); partition.setDbName(table.getDbName()); @@ -371,7 +379,7 @@ private Partition constructPartition( for (FieldSchema partKey : table.getPartitionKeys()) { if (i++ != 0) { fs.mkdirs(partPath); // Attempt to make the path in case it does not exist before we check - applyGroupAndPerms(fs, partPath, perms, grpName, false); + applyGroupAndPerms(fs, partPath, perms, acls, grpName, false); } partPath = constructPartialPartPath(partPath, partKey.getName().toLowerCase(), partKVs); } @@ -381,7 +389,7 @@ private Partition constructPartition( // Need not bother in case of HDFS as permission is taken care of by setting UMask fs.mkdirs(partPath); // Attempt to make the path in case it does not exist before we check if (!ShimLoader.getHadoopShims().getHCatShim().isFileInHDFS(fs, partPath)) { - applyGroupAndPerms(fs, partPath, perms, grpName, true); + applyGroupAndPerms(fs, partPath, perms, acls, grpName, true); } // Set the location in the StorageDescriptor @@ -400,21 +408,29 @@ private Partition constructPartition( return partition; } - private void applyGroupAndPerms(FileSystem fs, Path dir, FsPermission permission, - String group, boolean recursive) + private void applyGroupAndPerms(FileSystem fs, Path path, FsPermission permission, + List acls, String group, boolean recursive) throws IOException { if(LOG.isDebugEnabled()) { - LOG.debug("applyGroupAndPerms : " + dir + - " perms: " + permission + + LOG.debug("applyGroupAndPerms : " + path + + " perms: " + permission + " acls: " + acls + " group: " + group + " recursive: " + recursive); } - fs.setPermission(dir, permission); + + if (acls != null && ! acls.isEmpty()) { + fs.setAcl(path, acls); + } else { + fs.setPermission(path, permission); + } + if (recursive) { - for (FileStatus fileStatus : fs.listStatus(dir)) { + List fileAcls = removeDefaultAcls(acls); + + for (FileStatus fileStatus : fs.listStatus(path)) { if (fileStatus.isDir()) { - applyGroupAndPerms(fs, fileStatus.getPath(), permission, group, true); + applyGroupAndPerms(fs, fileStatus.getPath(), permission, acls, group, true); } else { - fs.setPermission(fileStatus.getPath(), permission); + applyGroupAndPerms(fs, fileStatus.getPath(), permission, fileAcls, group, false); } } } @@ -774,6 +790,23 @@ private void registerPartitions(JobContext context) throws IOException{ try { HiveConf hiveConf = HCatUtil.getHiveConf(conf); client = HCatUtil.getHiveMetastoreClient(hiveConf); + + FileStatus tblStat = fs.getFileStatus(tblPath); + String grpName = tblStat.getGroup(); + FsPermission perms = tblStat.getPermission(); + List acls = null; + + if (conf.getBoolean("dfs.namenode.acls.enabled", false)) { + try { + AclStatus stat = fs.getAclStatus(tblPath); + if (stat != null && ! stat.getEntries().isEmpty()) { + acls = generateChildAcls(stat.getEntries(), perms); + } + } catch (UnsupportedOperationException e) { + LOG.debug("Skipping ACLs", e); + } + } + if (table.getPartitionKeys().size() == 0) { // Move data from temp directory the actual table directory // No metastore operation required. @@ -787,27 +820,26 @@ private void registerPartitions(JobContext context) throws IOException{ table.getParameters().remove(StatsSetupConst.COLUMN_STATS_ACCURATE); client.alter_table(table.getDbName(), table.getTableName(), table.getTTable()); } + + applyGroupAndPerms(fs, tblPath, tblStat.getPermission(), acls, grpName, true); + return; } StorerInfo storer = InternalUtil.extractStorerInfo(table.getTTable().getSd(), table.getParameters()); - FileStatus tblStat = fs.getFileStatus(tblPath); - String grpName = tblStat.getGroup(); - FsPermission perms = tblStat.getPermission(); - List partitionsToAdd = new ArrayList(); if (!dynamicPartitioningUsed) { partitionsToAdd.add(constructPartition(context, jobInfo, tblPath.toString(), null, jobInfo.getPartitionValues(), jobInfo.getOutputSchema(), getStorerParameterMap(storer), - table, fs, grpName, perms)); + table, fs, grpName, perms, acls)); } else { for (Entry> entry : partitionsDiscoveredByPath.entrySet()) { partitionsToAdd.add(constructPartition(context, jobInfo, getPartitionRootLocation(entry.getKey(), entry.getValue().size()), entry.getKey(), entry.getValue(), jobInfo.getOutputSchema(), getStorerParameterMap(storer), table, - fs, grpName, perms)); + fs, grpName, perms, acls)); } } @@ -943,7 +975,7 @@ private void registerPartitions(JobContext context) throws IOException{ // Set permissions appropriately for each of the partitions we just created // so as to have their permissions mimic the table permissions for (Partition p : partitionsAdded){ - applyGroupAndPerms(fs,new Path(p.getSd().getLocation()),tblStat.getPermission(),tblStat.getGroup(),true); + applyGroupAndPerms(fs,new Path(p.getSd().getLocation()), tblStat.getPermission(), acls, tblStat.getGroup(), true); } } @@ -1017,5 +1049,84 @@ private void cancelDelegationTokens(JobContext context) throws IOException{ } } + private AclEntry createAclEntry(AclEntryScope scope, AclEntryType type, String name, FsAction perm) { + return new AclEntry.Builder().setScope(scope).setType(type) + .setName(name).setPermission(perm).build(); + } + + private List generateChildAcls(List parentAcls, FsPermission parentPerms) { + List acls = null; + List defaults = extractDefaultAcls(parentAcls); + if (! defaults.isEmpty()) { + // Generate child ACLs based on parent DEFAULTs. + acls = new ArrayList(defaults.size() * 2); + + // All ACCESS ACLs are derived from the DEFAULT ACLs of the parent. + // All DEFAULT ACLs of the parent are inherited by the child. + // If DEFAULT ACLs exist, it should include DEFAULTs for USER, OTHER, and MASK. + for (AclEntry acl : defaults) { + // OTHER permissions are not inherited by the child. + if (acl.getType() == AclEntryType.OTHER) { + acls.add(createAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, null, FsAction.NONE)); + } else { + acls.add(createAclEntry(AclEntryScope.ACCESS, acl.getType(), acl.getName(), acl.getPermission())); + } + } + + // Inherit all DEFAULT ACLs. + acls.addAll(defaults); + } else { + // Parent has no DEFAULTs, hence child inherits no ACLs. + // Set basic permissions only. + FsAction groupAction = null; + + for (AclEntry acl : parentAcls) { + if (acl.getType() == AclEntryType.GROUP) { + groupAction = acl.getPermission(); + break; + } + } + + acls = new ArrayList(3); + acls.add(createAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, null, parentPerms.getUserAction())); + acls.add(createAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, null, groupAction)); + acls.add(createAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, null, FsAction.NONE)); + } + + return acls; + } + + private static List extractDefaultAcls(List acls) { + List defaults = new ArrayList(acls); + Iterables.removeIf(defaults, new Predicate() { + @Override + public boolean apply(AclEntry acl) { + if (! acl.getScope().equals(AclEntryScope.DEFAULT)) { + return true; + } + return false; + } + }); + + return defaults; + } + + private List removeDefaultAcls(List acls) { + List nonDefaults = null; + if (acls != null) { + nonDefaults = new ArrayList(acls); + Iterables.removeIf(nonDefaults, new Predicate() { + @Override + public boolean apply(AclEntry acl) { + if (acl.getScope().equals(AclEntryScope.DEFAULT)) { + return true; + } + return false; + } + }); + } + + return nonDefaults; + } } diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/MiniCluster.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/MiniCluster.java index d9d8251..0064bf6 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/MiniCluster.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/MiniCluster.java @@ -49,17 +49,15 @@ private JobConf m_conf = null; private final static MiniCluster INSTANCE = new MiniCluster(); - private static boolean isSetup = true; + private static boolean isSetup = false; private MiniCluster() { - setupMiniDfsAndMrClusters(); } - private void setupMiniDfsAndMrClusters() { + private void setupMiniDfsAndMrClusters(Configuration config) { try { final int dataNodes = 1; // There will be 4 data nodes final int taskTrackers = 1; // There will be 4 task tracker nodes - Configuration config = new Configuration(); // Builds and starts the mini dfs and mapreduce clusters if(System.getProperty("hadoop.log.dir") == null) { @@ -97,8 +95,12 @@ private void setupMiniDfsAndMrClusters() { * mapreduce cluster. */ public static MiniCluster buildCluster() { + return buildCluster(new Configuration()); + } + + public static MiniCluster buildCluster(Configuration config) { if (!isSetup) { - INSTANCE.setupMiniDfsAndMrClusters(); + INSTANCE.setupMiniDfsAndMrClusters(config); isSetup = true; } return INSTANCE; @@ -133,6 +135,10 @@ private void shutdownMiniDfsAndMrClusters() { m_mr = null; } + public Configuration getConfiguration() { + return new Configuration(m_conf); + } + public Properties getProperties() { errorIfNotSetup(); Properties properties = new Properties(); diff --git itests/hcatalog-unit/pom.xml itests/hcatalog-unit/pom.xml index e338c5a..b74935e 100644 --- itests/hcatalog-unit/pom.xml +++ itests/hcatalog-unit/pom.xml @@ -101,6 +101,12 @@ ${project.version} test + + org.apache.hive + hive-it-util + ${project.version} + test + commons-io diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/pig/TestExtendedAcls.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/pig/TestExtendedAcls.java new file mode 100644 index 0000000..156665d --- /dev/null +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/pig/TestExtendedAcls.java @@ -0,0 +1,748 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hive.hcatalog.pig; + +import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; +import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT; +import static org.apache.hadoop.fs.permission.AclEntryType.GROUP; +import static org.apache.hadoop.fs.permission.AclEntryType.MASK; +import static org.apache.hadoop.fs.permission.AclEntryType.OTHER; +import static org.apache.hadoop.fs.permission.AclEntryType.USER; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hive.cli.CliSessionState; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.CommandNeedRetryException; +import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hive.hcatalog.HcatTestUtils; +import org.apache.hive.hcatalog.MiniCluster; +import org.apache.hive.hcatalog.common.HCatUtil; +import org.apache.pig.ExecType; +import org.apache.pig.PigServer; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.AfterClass; +import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.Random; +import java.util.Set; + +public class TestExtendedAcls { + private static final Logger LOG = LoggerFactory.getLogger(TestExtendedAcls.class); + private static final String TEST_DATA_DIR = HCatUtil.makePathASafeFileName(System.getProperty("java.io.tmpdir") + + File.separator + + TestExtendedAcls.class.getCanonicalName() + + "-" + System.currentTimeMillis()); + private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse"; + private static final String TEXT_DATA_FILE = TEST_DATA_DIR + "/basic.input.data"; + private static final String DEFAULT_DATABASE_NAME = "test_acls"; + private static final String TABLE_NAME_PREFIX = "test_acls_"; + + private static MiniCluster cluster = null; + private static FileSystem clusterFS = null; + private static Driver driver; + private static Random random = new Random(); + private static Set dbsCreated = Sets.newHashSet(); + + private static ImmutableList dirSpec = ImmutableList.of( + aclEntry(ACCESS, USER, FsAction.ALL), + aclEntry(ACCESS, GROUP, FsAction.READ_EXECUTE), + aclEntry(ACCESS, OTHER, FsAction.READ_EXECUTE), + aclEntry(ACCESS, MASK, FsAction.ALL), + aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar", FsAction.ALL), + aclEntry(DEFAULT, USER, FsAction.ALL), + aclEntry(DEFAULT, GROUP, FsAction.READ_EXECUTE), + aclEntry(DEFAULT, OTHER, FsAction.READ_EXECUTE), + aclEntry(DEFAULT, MASK, FsAction.ALL), + aclEntry(DEFAULT, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, GROUP, "bar", FsAction.ALL)); + + private static ImmutableList restrictedDirSpec = ImmutableList.of( + aclEntry(ACCESS, USER, FsAction.ALL), + aclEntry(ACCESS, GROUP, FsAction.NONE), + aclEntry(ACCESS, OTHER, FsAction.READ_EXECUTE), + aclEntry(ACCESS, MASK, FsAction.ALL), + aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, USER, FsAction.ALL), + aclEntry(DEFAULT, GROUP, FsAction.NONE), + aclEntry(DEFAULT, OTHER, FsAction.READ_EXECUTE), + aclEntry(DEFAULT, MASK, FsAction.ALL), + aclEntry(DEFAULT, USER, "foo", FsAction.READ_EXECUTE)); + + private static ImmutableList dirSpec2 = ImmutableList.of( + aclEntry(ACCESS, USER, FsAction.ALL), + aclEntry(ACCESS, GROUP, FsAction.ALL), + aclEntry(ACCESS, OTHER, FsAction.READ_EXECUTE), + aclEntry(ACCESS, MASK, FsAction.ALL), + aclEntry(ACCESS, USER, "foo", FsAction.ALL), + aclEntry(ACCESS, USER, "foo2", FsAction.ALL), // No DEFAULT, so child should not inherit. + aclEntry(ACCESS, GROUP, "bar", FsAction.ALL), + aclEntry(ACCESS, GROUP, "bar2", FsAction.ALL), // No DEFAULT, so child should not inherit. + aclEntry(DEFAULT, USER, FsAction.ALL), + aclEntry(DEFAULT, GROUP, FsAction.READ_EXECUTE), + aclEntry(DEFAULT, OTHER, FsAction.READ_EXECUTE), + aclEntry(DEFAULT, MASK, FsAction.ALL), + aclEntry(DEFAULT, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, GROUP, "bar", FsAction.ALL)); + + private static ImmutableList expectedDirSpec = ImmutableList.of( + aclEntry(ACCESS, USER, FsAction.ALL), + aclEntry(ACCESS, GROUP, FsAction.READ_EXECUTE), + aclEntry(ACCESS, OTHER, FsAction.NONE), + aclEntry(ACCESS, MASK, FsAction.ALL), + aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar", FsAction.ALL), + aclEntry(DEFAULT, USER, FsAction.ALL), + aclEntry(DEFAULT, GROUP, FsAction.READ_EXECUTE), + aclEntry(DEFAULT, OTHER, FsAction.READ_EXECUTE), + aclEntry(DEFAULT, MASK, FsAction.ALL), + aclEntry(DEFAULT, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, GROUP, "bar", FsAction.ALL)); + + private static ImmutableList expectedRestrictedDirSpec = ImmutableList.of( + aclEntry(ACCESS, USER, FsAction.ALL), + aclEntry(ACCESS, GROUP, FsAction.NONE), + aclEntry(ACCESS, OTHER, FsAction.NONE), + aclEntry(ACCESS, MASK, FsAction.ALL), + aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, USER, FsAction.ALL), + aclEntry(DEFAULT, GROUP, FsAction.NONE), + aclEntry(DEFAULT, OTHER, FsAction.READ_EXECUTE), + aclEntry(DEFAULT, MASK, FsAction.ALL), + aclEntry(DEFAULT, USER, "foo", FsAction.READ_EXECUTE)); + + private static ImmutableList expectedDirSpec2 = ImmutableList.of( + aclEntry(ACCESS, USER, FsAction.ALL), + aclEntry(ACCESS, GROUP, FsAction.ALL), + aclEntry(ACCESS, OTHER, FsAction.NONE), + aclEntry(ACCESS, MASK, FsAction.ALL), + aclEntry(ACCESS, USER, "foo", FsAction.ALL), + aclEntry(ACCESS, GROUP, "bar", FsAction.ALL), + aclEntry(DEFAULT, USER, FsAction.ALL), + aclEntry(DEFAULT, GROUP, FsAction.READ_EXECUTE), + aclEntry(DEFAULT, OTHER, FsAction.READ_EXECUTE), + aclEntry(DEFAULT, MASK, FsAction.ALL), + aclEntry(DEFAULT, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, GROUP, "bar", FsAction.ALL)); + + public TestExtendedAcls() { + } + + @BeforeClass + public static void setupAllTests() throws Exception { + setUpCluster(); + setUpLocalFileSystemDirectories(); + setUpClusterFileSystemDirectories(); + setUpHiveDriver(); + createTextData(); + } + + @Before + public void setupSingleTest() throws Exception { + } + + @AfterClass + public static void tearDownAllTests() throws Exception { + for (String db : dbsCreated) { + dropDatabase(db); + } + tearDownCluster(); + cleanUpLocalFileSystemDirectories(); + } + + @Test + public void testUnpartitionedTable() throws IOException, CommandNeedRetryException, FileNotFoundException { + String dbName = DEFAULT_DATABASE_NAME; + String tableName = TABLE_NAME_PREFIX + "1"; + + Path warehouseDir = new Path(TEST_WAREHOUSE_DIR); + Path dbDir = new Path(warehouseDir, dbName + ".db"); + Path tblDir = new Path(dbDir, tableName); + FileSystem fs = cluster.getFileSystem(); + + if (! dbsCreated.contains(dbName)) { + createDatabase(dbName); + dbsCreated.add(dbName); + } + + // No extended ACLs on the database directory. + fs.setPermission(dbDir, new FsPermission((short) 0750)); + + createUnpartitionedTable(dbName, tableName); + + // Extended ACLs on the table directory. + fs.setAcl(tblDir, dirSpec); + + FileStatus fstat = fs.getFileStatus(tblDir); + List acls = fs.getAclStatus(tblDir).getEntries(); + + verify(tblDir, new FsPermission((short) 0775), fstat.getPermission(), dirSpec, acls, false); + + Properties props = new Properties(); + props.setProperty("dfs.namenode.acls.enabled", "true"); + + PigServer server = getPigServer(props); + server.setBatchOn(); + int i = 0; + server.registerQuery("A = LOAD '" + TEXT_DATA_FILE + "' AS (a:int, b:chararray, c:chararray, dt:chararray, ds:chararray);", ++i); + server.registerQuery("B = FOREACH A GENERATE a, b, c;", ++i); + server.registerQuery("STORE B INTO '" + dbName + "." + tableName + "' USING org.apache.hive.hcatalog.pig.HCatStorer();", ++i); + server.executeBatch(); + + FileStatus[] fstats = fs.listStatus(tblDir); + + // All files in the table directory should inherit table ACLs. + for (FileStatus fstat1 : fstats) { + acls = fs.getAclStatus(fstat1.getPath()).getEntries(); + verify(fstat1.getPath(), new FsPermission((short) 0770), fstat1.getPermission(), expectedDirSpec, acls, true); + } + + for (FileStatus fstat1 : fstats) { + fs.delete(fstat1.getPath(), true); + } + + fs.setAcl(tblDir, dirSpec2); + + fstat = fs.getFileStatus(tblDir); + acls = fs.getAclStatus(tblDir).getEntries(); + + verify(tblDir, new FsPermission((short) 0775), fstat.getPermission(), dirSpec2, acls, false); + + server.registerQuery("A = LOAD '" + TEXT_DATA_FILE + "' AS (a:int, b:chararray, c:chararray, dt:chararray, ds:chararray);", ++i); + server.registerQuery("B = FOREACH A GENERATE a, b, c;", ++i); + server.registerQuery("STORE B INTO '" + dbName + "." + tableName + "' USING org.apache.hive.hcatalog.pig.HCatStorer();", ++i); + server.executeBatch(); + + fstats = fs.listStatus(tblDir); + + // All files in the table directory should inherit table ACLs. + for (FileStatus fstat1 : fstats) { + acls = fs.getAclStatus(fstat1.getPath()).getEntries(); + verify(fstat1.getPath(), new FsPermission((short) 0770), fstat1.getPermission(), expectedDirSpec, acls, true); + } + } + + @Test + public void testPartitionedTableStatic() throws IOException, CommandNeedRetryException, FileNotFoundException { + String dbName = DEFAULT_DATABASE_NAME; + String tableName = TABLE_NAME_PREFIX + "2"; + + Path warehouseDir = new Path(TEST_WAREHOUSE_DIR); + Path dbDir = new Path(warehouseDir, dbName + ".db"); + Path tblDir = new Path(dbDir, tableName); + FileSystem fs = cluster.getFileSystem(); + + if (! dbsCreated.contains(dbName)) { + createDatabase(dbName); + dbsCreated.add(dbName); + } + + // No extended ACLs on the database directory. + fs.setPermission(dbDir, new FsPermission((short) 0750)); + + createPartitionedTable(dbName, tableName); + + // Extended ACLs on the table directory. + fs.setAcl(tblDir, dirSpec); + + FileStatus fstat = fs.getFileStatus(tblDir); + List acls = fs.getAclStatus(tblDir).getEntries(); + + verify(tblDir, new FsPermission((short) 0775), fstat.getPermission(), dirSpec, acls, false); + + Properties props = new Properties(); + props.setProperty("dfs.namenode.acls.enabled", "true"); + + PigServer server = getPigServer(props); + server.setBatchOn(); + int i = 0; + server.registerQuery("A = LOAD '" + TEXT_DATA_FILE + "' AS (a:int, b:chararray, c:chararray, dt:chararray, ds:chararray);", ++i); + server.registerQuery("B = FOREACH A GENERATE a, b, c;", ++i); + server.registerQuery("STORE B INTO '" + dbName + "." + tableName + "' USING org.apache.hive.hcatalog.pig.HCatStorer('dt=1,ds=1');", ++i); + server.executeBatch(); + + // Partition directories (dt=1/ds=1) and all files in the table directory should inherit table ACLs. + Path partDir = new Path(tblDir, "dt=1"); + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), expectedDirSpec, acls, false); + + partDir = new Path(partDir, "ds=1"); + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), expectedDirSpec, acls, false); + + FileStatus[] fstats = fs.listStatus(partDir); + + Assert.assertTrue("Expected to find files in " + partDir, fstats.length > 0); + + for (FileStatus fstat1 : fstats) { + acls = fs.getAclStatus(fstat1.getPath()).getEntries(); + verify(fstat1.getPath(), new FsPermission((short) 0770), fstat1.getPermission(), expectedDirSpec, acls, true); + } + + // Parent directory (dt=1) has restrictive ACLs compared to table. + // Child directory (dt=1/ds=2) should inherit from table instead of parent. + // NOTE: this behavior is different from hive and should be corrected to inherit from the parent instead. + partDir = new Path(tblDir, "dt=1"); + fs.setAcl(partDir, restrictedDirSpec); + + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0775), fstat.getPermission(), restrictedDirSpec, acls, false); + + server.registerQuery("A = LOAD '" + TEXT_DATA_FILE + "' AS (a:int, b:chararray, c:chararray, dt:chararray, ds:chararray);", ++i); + server.registerQuery("B = FOREACH A GENERATE a, b, c;", ++i); + server.registerQuery("STORE B INTO '" + dbName + "." + tableName + "' USING org.apache.hive.hcatalog.pig.HCatStorer('dt=1,ds=2');", ++i); + server.executeBatch(); + + partDir = new Path(partDir, "ds=2"); + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), expectedDirSpec, acls, false); + + fstats = fs.listStatus(partDir); + + Assert.assertTrue("Expected to find files in " + partDir, fstats.length > 0); + + for (FileStatus fstat1 : fstats) { + acls = fs.getAclStatus(fstat1.getPath()).getEntries(); + verify(fstat1.getPath(), new FsPermission((short) 0770), fstat1.getPermission(), expectedDirSpec, acls, true); + } + + partDir = new Path(tblDir, "dt=1"); + fs.setAcl(partDir, dirSpec2); + + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0775), fstat.getPermission(), dirSpec2, acls, false); + + server.registerQuery("A = LOAD '" + TEXT_DATA_FILE + "' AS (a:int, b:chararray, c:chararray, dt:chararray, ds:chararray);", ++i); + server.registerQuery("B = FOREACH A GENERATE a, b, c;", ++i); + server.registerQuery("STORE B INTO '" + dbName + "." + tableName + "' USING org.apache.hive.hcatalog.pig.HCatStorer('dt=1,ds=3');", ++i); + server.executeBatch(); + + partDir = new Path(partDir, "ds=3"); + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), expectedDirSpec, acls, false); + + fstats = fs.listStatus(partDir); + + Assert.assertTrue("Expected to find files in " + partDir, fstats.length > 0); + + for (FileStatus fstat1 : fstats) { + acls = fs.getAclStatus(fstat1.getPath()).getEntries(); + verify(fstat1.getPath(), new FsPermission((short) 0770), fstat1.getPermission(), expectedDirSpec, acls, true); + } + } + + @Test + public void testPartitionedTableDynamic() throws IOException, CommandNeedRetryException, FileNotFoundException { + String dbName = DEFAULT_DATABASE_NAME; + String tableName = TABLE_NAME_PREFIX + "3"; + + Path warehouseDir = new Path(TEST_WAREHOUSE_DIR); + Path dbDir = new Path(warehouseDir, dbName + ".db"); + Path tblDir = new Path(dbDir, tableName); + FileSystem fs = cluster.getFileSystem(); + + if (! dbsCreated.contains(dbName)) { + createDatabase(dbName); + dbsCreated.add(dbName); + } + + // No extended ACLs on the database directory. + fs.setPermission(dbDir, new FsPermission((short) 0750)); + + createPartitionedTable(dbName, tableName); + + // Extended ACLs on the table directory. + fs.setAcl(tblDir, dirSpec); + + FileStatus fstat = fs.getFileStatus(tblDir); + List acls = fs.getAclStatus(tblDir).getEntries(); + + verify(tblDir, new FsPermission((short) 0775), fstat.getPermission(), dirSpec, acls, false); + + Properties props = new Properties(); + props.setProperty("hive.exec.dynamic.partition", "true"); + props.setProperty("hive.exec.dynamic.partition.mode", "nonstrict"); + props.setProperty("dfs.namenode.acls.enabled", "true"); + + PigServer server = getPigServer(props); + server.setBatchOn(); + int i = 0; + server.registerQuery("A = LOAD '" + TEXT_DATA_FILE + "' AS (a:int, b:chararray, c:chararray, dt:chararray, ds:chararray);", ++i); + server.registerQuery("B = FILTER A BY dt == '1' AND ds == '1';", ++i); + server.registerQuery("C = FOREACH B GENERATE a, b, c, dt, ds;", ++i); + server.registerQuery("STORE C INTO '" + dbName + "." + tableName + "' USING org.apache.hive.hcatalog.pig.HCatStorer();", ++i); + server.executeBatch(); + + + // Currently, partition directories created on the dynamic partitioned code path are incorrect. + // Partition directories (dt=1/ds=1) and all files in the table directory should inherit table ACLs. + Path partDir = new Path(tblDir, "dt=1"); + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + // TODO: Must check this separately because intermediate partition directories are not created properly. + Assert.assertEquals("Expected permission to be 0750", new FsPermission((short) 0755), fstat.getPermission()); + verifyAcls(partDir, dirSpec, acls, false); + //verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), expectedDirSpec, acls, false); + + // This directory is moved from the temporary location, so should match dirSpec. + partDir = new Path(partDir, "ds=1"); + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), expectedDirSpec, acls, false); + + FileStatus[] fstats = fs.listStatus(partDir); + + Assert.assertTrue("Expected to find files in " + partDir, fstats.length > 0); + + for (FileStatus fstat1 : fstats) { + acls = fs.getAclStatus(fstat1.getPath()).getEntries(); + verify(fstat1.getPath(), new FsPermission((short) 0770), fstat1.getPermission(), expectedDirSpec, acls, true); + } + + // Parent directory (dt=1) has restrictive ACLs compared to table. + // Child directory (dt=1/ds=2) should inherit from table instead of parent. + // NOTE: this behavior is different from hive and should be corrected to inherit from the parent instead. + partDir = new Path(tblDir, "dt=1"); + fs.setAcl(partDir, restrictedDirSpec); + + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0775), fstat.getPermission(), restrictedDirSpec, acls, false); + + server.registerQuery("A = LOAD '" + TEXT_DATA_FILE + "' AS (a:int, b:chararray, c:chararray, dt:chararray, ds:chararray);", ++i); + server.registerQuery("B = FILTER A BY dt == '1' AND ds == '2';", ++i); + server.registerQuery("C = FOREACH B GENERATE a, b, c, dt, ds;", ++i); + server.registerQuery("STORE C INTO '" + dbName + "." + tableName + "' USING org.apache.hive.hcatalog.pig.HCatStorer();", ++i); + server.executeBatch(); + + partDir = new Path(partDir, "ds=2"); + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), expectedDirSpec, acls, false); + + fstats = fs.listStatus(partDir); + + Assert.assertTrue("Expected to find files in " + partDir, fstats.length > 0); + + for (FileStatus fstat1 : fstats) { + acls = fs.getAclStatus(fstat1.getPath()).getEntries(); + verify(fstat1.getPath(), new FsPermission((short) 0770), fstat1.getPermission(), expectedDirSpec, acls, true); + } + + partDir = new Path(tblDir, "dt=1"); + fs.setAcl(partDir, dirSpec2); + + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0775), fstat.getPermission(), dirSpec2, acls, false); + + server.registerQuery("A = LOAD '" + TEXT_DATA_FILE + "' AS (a:int, b:chararray, c:chararray, dt:chararray, ds:chararray);", ++i); + server.registerQuery("B = FILTER A BY dt == '1' AND ds == '3';", ++i); + server.registerQuery("C = FOREACH B GENERATE a, b, c, dt, ds;", ++i); + server.registerQuery("STORE C INTO '" + dbName + "." + tableName + "' USING org.apache.hive.hcatalog.pig.HCatStorer();", ++i); + server.executeBatch(); + + partDir = new Path(partDir, "ds=3"); + fstat = fs.getFileStatus(partDir); + acls = fs.getAclStatus(partDir).getEntries(); + + verify(partDir, new FsPermission((short) 0770), fstat.getPermission(), expectedDirSpec, acls, false); + + fstats = fs.listStatus(partDir); + + Assert.assertTrue("Expected to find files in " + partDir, fstats.length > 0); + + for (FileStatus fstat1 : fstats) { + acls = fs.getAclStatus(fstat1.getPath()).getEntries(); + verify(fstat1.getPath(), new FsPermission((short) 0770), fstat1.getPermission(), expectedDirSpec, acls, true); + } + } + + private static void setUpCluster() throws Exception { + Configuration config = new Configuration(); + config.set("dfs.namenode.acls.enabled", "true"); + + cluster = MiniCluster.buildCluster(config); + clusterFS = cluster.getFileSystem(); + } + + private static void tearDownCluster() throws Exception { + cluster.shutDown(); + } + + private static void setUpLocalFileSystemDirectories() { + File f = new File(TEST_DATA_DIR); + if (f.exists()) { + FileUtil.fullyDelete(f); + } + if(!(new File(TEST_DATA_DIR).mkdirs())) { + throw new RuntimeException("Could not create test-directory " + TEST_DATA_DIR + " on local filesystem."); + } + } + + private static void cleanUpLocalFileSystemDirectories() { + File f = new File(TEST_DATA_DIR); + if (f.exists()) { + FileUtil.fullyDelete(f); + } + } + + private static void setUpClusterFileSystemDirectories() throws IOException { + FileSystem clusterFS = cluster.getFileSystem(); + Path warehouseDir = new Path(TEST_WAREHOUSE_DIR); + if (clusterFS.exists(warehouseDir)) { + clusterFS.delete(warehouseDir, true); + } + clusterFS.mkdirs(warehouseDir); + } + + private static void setUpHiveDriver() throws IOException { + HiveConf hiveConf = createHiveConf(); + driver = new Driver(hiveConf); + driver.setMaxRows(1000); + SessionState.start(new CliSessionState(hiveConf)); + } + + private static HiveConf createHiveConf() { + HiveConf hiveConf = new HiveConf(cluster.getConfiguration(), TestExtendedAcls.class); + hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); + return hiveConf; + } + + /** + * Create data with schema: + * number \t string \t filler_string + * @throws Exception + */ + private static void createTextData() throws Exception { + int LOOP_SIZE = 100; + ArrayList input = Lists.newArrayListWithExpectedSize((LOOP_SIZE+1) * LOOP_SIZE); + for (int i = 1; i <= LOOP_SIZE; i++) { + String si = i + ""; + String sk = i % 2 == 0 ? "1" : "2"; + for (int j = 1; j <= LOOP_SIZE; j++) { + String sj = "S" + j + "S"; + String sm = Integer.toString((j % 3) + 1); + input.add(si + "\t" + (i*j) + "\t" + sj + "\t" + sk + "\t" + sm); + } + } + + // Add nulls. + for (int i=0; i> 1) + 1); + input.add("\t" + "\t" + "S" + "_null_" + i + "_S" + "\t" + sk + "\t" + sm); + } + + HcatTestUtils.createTestDataFile(TEXT_DATA_FILE, input.toArray(new String[input.size()])); + FileSystem fs = cluster.getFileSystem(); + fs.copyFromLocalFile(new Path(TEXT_DATA_FILE), new Path(TEXT_DATA_FILE)); + } + + private void createDatabase(String dbName) throws IOException, CommandNeedRetryException { + String cmd = "CREATE DATABASE IF NOT EXISTS " + dbName; + executeStatementOnDriver(cmd); + + cmd = "DESC DATABASE " + dbName; + executeStatementOnDriver(cmd); + } + + private void createUnpartitionedTable(String dbName, String tableName) throws IOException, CommandNeedRetryException { + createTable(dbName, tableName, "a INT, b STRING, c STRING", null); + } + + private void createPartitionedTable(String dbName, String tableName) throws IOException, CommandNeedRetryException { + createTable(dbName, tableName, "a INT, b STRING, c STRING", "dt STRING, ds STRING"); + } + + private void createTable(String dbName, String tableName, String schema, String partitionedBy) + throws IOException, CommandNeedRetryException { + String cmd = "CREATE TABLE " + dbName + "." + tableName + "(" + schema + ") "; + if ((partitionedBy != null) && (!partitionedBy.trim().isEmpty())) { + cmd = cmd + "PARTITIONED BY (" + partitionedBy + ") "; + } + executeStatementOnDriver(cmd); + + cmd = "DESC FORMATTED " + dbName + "." + tableName; + executeStatementOnDriver(cmd); + } + + /** + * Execute Hive CLI statement + * @param cmd arbitrary statement to execute + */ + private void executeStatementOnDriver(String cmd) throws IOException, CommandNeedRetryException { + LOG.debug("Executing: " + cmd); + CommandProcessorResponse cpr = driver.run(cmd); + if(cpr.getResponseCode() != 0) { + throw new IOException("Failed to execute \"" + cmd + "\". " + + "Driver returned " + cpr.getResponseCode() + + " Error: " + cpr.getErrorMessage()); + } + } + + private static void dropDatabase(String dbName) throws IOException, CommandNeedRetryException { + driver.run("DROP DATABASE IF EXISTS " + dbName + " CASCADE"); + } + + private PigServer getPigServer() throws IOException { + return getPigServer(null); + } + + private PigServer getPigServer(Properties props) throws IOException { + if (props != null) { + return new PigServer(ExecType.LOCAL, props); + } else { + return new PigServer(ExecType.LOCAL); + } + } + + /** + * Create a new AclEntry with scope, type and permission (no name). + * + * @param scope AclEntryScope scope of the ACL entry + * @param type AclEntryType ACL entry type + * @param permission FsAction set of permissions in the ACL entry + * @return new AclEntry + */ + private static AclEntry aclEntry(AclEntryScope scope, AclEntryType type, + FsAction permission) { + return new AclEntry.Builder().setScope(scope).setType(type) + .setPermission(permission).build(); + } + + /** + * Create a new AclEntry with scope, type, name and permission. + * + * @param scope AclEntryScope scope of the ACL entry + * @param type AclEntryType ACL entry type + * @param name String optional ACL entry name + * @param permission FsAction set of permissions in the ACL entry + * @return new AclEntry + */ + private static AclEntry aclEntry(AclEntryScope scope, AclEntryType type, + String name, FsAction permission) { + return new AclEntry.Builder().setScope(scope).setType(type).setName(name) + .setPermission(permission).build(); + } + + private void verify(Path path, FsPermission expectedPerm, FsPermission actualPerm, + List expectedAcls, List actualAcls, boolean isFile) { + FsAction maskPerm = null; + + LOG.debug("Verify permissions on " + path + " expected=" + expectedPerm + " " + expectedAcls + " actual=" + actualPerm + " " + actualAcls); + + for (AclEntry expected : expectedAcls) { + if (expected.getType() == MASK && expected.getScope() == DEFAULT) { + maskPerm = expected.getPermission(); + break; + } + } + + Assert.assertTrue("Permissions on " + path + " differ: expected=" + expectedPerm + " actual=" + actualPerm, + expectedPerm.getUserAction() == actualPerm.getUserAction()); + Assert.assertTrue("Permissions on " + path + " differ: expected=" + expectedPerm + " actual=" + actualPerm, + expectedPerm.getOtherAction() == actualPerm.getOtherAction()); + Assert.assertTrue("Mask permissions on " + path + " differ: expected=" + maskPerm + " actual=" + actualPerm, + maskPerm == actualPerm.getGroupAction()); + verifyAcls(path, expectedAcls, actualAcls, isFile); + } + + private void verifyAcls(Path path, List expectedAcls, List actualAcls, boolean isFile) { + ArrayList acls = new ArrayList(actualAcls); + + for (AclEntry expected : expectedAcls) { + if (! isFile && expected.getScope() == DEFAULT) { + boolean found = false; + for (AclEntry acl : acls) { + if (acl.equals(expected)) { + acls.remove(acl); + found = true; + break; + } + } + + Assert.assertTrue("ACLs on " + path + " differ: " + expected + " expected=" + expectedAcls + " actual=" + actualAcls, found); + } else if (expected.getName() != null || expected.getType() == GROUP) { + // Check file permissions for non-named ACLs, except for GROUP. + if (isFile && expected.getScope() == DEFAULT) { + // Files should not have DEFAULT ACLs. + continue; + } + + boolean found = false; + for (AclEntry acl : acls) { + if (acl.equals(expected)) { + acls.remove(acl); + found = true; + break; + } + } + + Assert.assertTrue("ACLs on " + path + " differ: " + expected + " expected=" + expectedAcls + " actual=" + actualAcls, found); + } + } + + Assert.assertTrue("More ACLs on " + path + " than expected: actual=" + acls, acls.size() == 0); + } +} diff --git itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java index b798379..108eb99 100644 --- itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java +++ itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java @@ -18,13 +18,17 @@ package org.apache.hadoop.hive.ql.security; import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; +import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT; import static org.apache.hadoop.fs.permission.AclEntryType.GROUP; +import static org.apache.hadoop.fs.permission.AclEntryType.MASK; import static org.apache.hadoop.fs.permission.AclEntryType.OTHER; import static org.apache.hadoop.fs.permission.AclEntryType.USER; +import java.util.ArrayList; import java.util.List; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; import org.apache.hadoop.fs.permission.AclEntryType; @@ -34,11 +38,14 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.junit.Assert; import org.junit.BeforeClass; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; public class TestExtendedAcls extends FolderPermissionBase { + private static final Logger LOG = LoggerFactory.getLogger(TestExtendedAcls.class); @BeforeClass public static void setup() throws Exception { @@ -49,32 +56,152 @@ public static void setup() throws Exception { baseSetup(); } - private final ImmutableList aclSpec1 = ImmutableList.of( + // ACLs for setting up directories and files. + private final ImmutableList aclsForDirs1 = ImmutableList.of( aclEntry(ACCESS, USER, FsAction.ALL), aclEntry(ACCESS, GROUP, FsAction.ALL), aclEntry(ACCESS, OTHER, FsAction.ALL), + aclEntry(ACCESS, MASK, FsAction.ALL), + aclEntry(ACCESS, USER, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, GROUP, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, USER, "bar", FsAction.READ_WRITE), + aclEntry(DEFAULT, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, GROUP, "bar", FsAction.READ_WRITE), + aclEntry(DEFAULT, GROUP, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, MASK, FsAction.ALL)); + + private final ImmutableList aclsForDirs2 = ImmutableList.of( + aclEntry(ACCESS, USER, FsAction.ALL), + aclEntry(ACCESS, GROUP, FsAction.ALL), + aclEntry(ACCESS, OTHER, FsAction.READ_EXECUTE), + aclEntry(ACCESS, MASK, FsAction.ALL), + aclEntry(ACCESS, USER, "bar2", FsAction.READ_WRITE), + aclEntry(ACCESS, USER, "foo2", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar2", FsAction.READ), + aclEntry(ACCESS, GROUP, "foo2", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, USER, "bar2", FsAction.READ_WRITE), + aclEntry(DEFAULT, USER, "foo2", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, GROUP, "bar2", FsAction.READ_WRITE), + aclEntry(DEFAULT, GROUP, "foo2", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, MASK, FsAction.ALL)); + + private final ImmutableList aclsForFiles1 = ImmutableList.of( + aclEntry(ACCESS, USER, FsAction.ALL), + aclEntry(ACCESS, GROUP, FsAction.ALL), + aclEntry(ACCESS, OTHER, FsAction.ALL), + aclEntry(ACCESS, MASK, FsAction.ALL), aclEntry(ACCESS, USER, "bar", FsAction.READ_WRITE), aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", FsAction.READ_WRITE), aclEntry(ACCESS, GROUP, "foo", FsAction.READ_EXECUTE)); - private final ImmutableList aclSpec2 = ImmutableList.of( + private final ImmutableList aclsForFiles2 = ImmutableList.of( aclEntry(ACCESS, USER, FsAction.ALL), aclEntry(ACCESS, GROUP, FsAction.ALL), aclEntry(ACCESS, OTHER, FsAction.READ_EXECUTE), + aclEntry(ACCESS, MASK, FsAction.ALL), + aclEntry(ACCESS, USER, "bar2", FsAction.READ_WRITE), + aclEntry(ACCESS, USER, "foo2", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar2", FsAction.READ_WRITE), + aclEntry(ACCESS, GROUP, "foo2", FsAction.READ_EXECUTE)); + + // ACLs for verifying directories and files. + private final ImmutableList aclDirSpec1 = ImmutableList.of( + aclEntry(ACCESS, GROUP, FsAction.ALL), + aclEntry(ACCESS, USER, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, GROUP, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, USER, FsAction.ALL), + aclEntry(DEFAULT, GROUP, FsAction.ALL), + aclEntry(DEFAULT, OTHER, FsAction.ALL), + aclEntry(DEFAULT, USER, "bar", FsAction.READ_WRITE), + aclEntry(DEFAULT, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, GROUP, "bar", FsAction.READ_WRITE), + aclEntry(DEFAULT, GROUP, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, MASK, FsAction.ALL)); + + private final ImmutableList aclDirSpec2 = ImmutableList.of( + aclEntry(ACCESS, GROUP, FsAction.ALL), aclEntry(ACCESS, USER, "bar2", FsAction.READ_WRITE), aclEntry(ACCESS, USER, "foo2", FsAction.READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar2", FsAction.READ), + aclEntry(ACCESS, GROUP, "foo2", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, USER, FsAction.ALL), + aclEntry(DEFAULT, GROUP, FsAction.ALL), + aclEntry(DEFAULT, OTHER, FsAction.READ_EXECUTE), + aclEntry(DEFAULT, USER, "bar2", FsAction.READ_WRITE), + aclEntry(DEFAULT, USER, "foo2", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, GROUP, "bar2", FsAction.READ_WRITE), + aclEntry(DEFAULT, GROUP, "foo2", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, MASK, FsAction.ALL)); + + private final FsPermission perm1 = new FsPermission((short) 0777); + private final FsPermission perm2 = new FsPermission((short) 0775); + + private final ImmutableList aclFileSpec1 = ImmutableList.of( + aclEntry(ACCESS, GROUP, FsAction.ALL), + aclEntry(ACCESS, USER, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, GROUP, "foo", FsAction.READ_EXECUTE)); + + private final ImmutableList aclFileSpec2 = ImmutableList.of( + aclEntry(ACCESS, GROUP, FsAction.ALL), + aclEntry(ACCESS, USER, "bar2", FsAction.READ_WRITE), + aclEntry(ACCESS, USER, "foo2", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar2", FsAction.READ_WRITE), aclEntry(ACCESS, GROUP, "foo2", FsAction.READ_EXECUTE)); + private final FsPermission permFile1 = new FsPermission((short) 0770); + private final FsPermission permFile2 = new FsPermission((short) 0770); + + private final ImmutableList aclInheritedSpec1 = ImmutableList.of( + aclEntry(ACCESS, GROUP, FsAction.ALL), + aclEntry(ACCESS, USER, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, GROUP, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, USER, FsAction.ALL), + aclEntry(DEFAULT, GROUP, FsAction.ALL), + aclEntry(DEFAULT, OTHER, FsAction.ALL), + aclEntry(DEFAULT, MASK, FsAction.ALL), + aclEntry(DEFAULT, USER, "bar", FsAction.READ_WRITE), + aclEntry(DEFAULT, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, GROUP, "bar", FsAction.READ_WRITE), + aclEntry(DEFAULT, GROUP, "foo", FsAction.READ_EXECUTE)); + + private final ImmutableList aclInheritedSpec2 = ImmutableList.of( + aclEntry(ACCESS, GROUP, FsAction.ALL), + aclEntry(ACCESS, USER, "bar2", FsAction.READ_WRITE), + aclEntry(ACCESS, USER, "foo2", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar2", FsAction.READ_WRITE), + aclEntry(ACCESS, GROUP, "foo2", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, USER, "bar2", FsAction.READ_WRITE), + aclEntry(DEFAULT, USER, FsAction.ALL), + aclEntry(DEFAULT, GROUP, FsAction.ALL), + aclEntry(DEFAULT, OTHER, FsAction.READ_EXECUTE), + aclEntry(DEFAULT, MASK, FsAction.ALL), + aclEntry(DEFAULT, USER, "foo2", FsAction.READ_EXECUTE), + aclEntry(DEFAULT, GROUP, "bar2", FsAction.READ_WRITE), + aclEntry(DEFAULT, GROUP, "foo2", FsAction.READ_EXECUTE)); + + private final FsPermission permInherited1 = new FsPermission((short) 0770); + private final FsPermission permInherited2 = new FsPermission((short) 0770); + @Override public void setPermission(String locn, int permIndex) throws Exception { + Path path = new Path(locn); + FileStatus fstat = fs.getFileStatus(path); + switch (permIndex) { case 0: - setAcl(locn, aclSpec1); + fs.setAcl(path, fstat.isDir() ? aclsForDirs1 : aclsForFiles1); break; case 1: - setAcl(locn, aclSpec2); + fs.setAcl(path, fstat.isDir() ? aclsForDirs2 : aclsForFiles2); break; default: throw new RuntimeException("Only 2 permissions by this test"); @@ -83,20 +210,50 @@ public void setPermission(String locn, int permIndex) throws Exception { @Override public void verifyPermission(String locn, int permIndex) throws Exception { + Path path = new Path(locn); + FileStatus fstat = fs.getFileStatus(path); + FsPermission perm = fstat.getPermission(); + List actual = null; + + switch (permIndex) { + case 0: + actual = fs.getAclStatus(path).getEntries(); + verify(path, perm1, perm, aclDirSpec1, actual); + break; + case 1: + actual = fs.getAclStatus(path).getEntries(); + verify(path, perm2, perm, aclDirSpec2, actual); + break; + default: + throw new RuntimeException("Only 2 permissions by this test: " + permIndex); + } + } + + @Override + public void verifyInheritedPermission(String locn, int permIndex) throws Exception { + Path path = new Path(locn); + FileStatus fstat = fs.getFileStatus(path); + FsPermission perm = fstat.getPermission(); + List acls = fs.getAclStatus(path).getEntries(); + FsPermission expectedPerm = null; + List expectedAcls = null; + switch (permIndex) { case 0: - FsPermission perm = fs.getFileStatus(new Path(locn)).getPermission(); - Assert.assertEquals("Location: " + locn, "rwxrwxrwx", String.valueOf(perm)); + //Assert.assertEquals("Location: " + locn, "rwxrwxrwx", String.valueOf(perm)); + + expectedPerm = fstat.isFile() ? permFile1 : permInherited1; + expectedAcls = fstat.isFile() ? aclFileSpec1 : aclInheritedSpec1; - List actual = getAcl(locn); - verifyAcls(aclSpec1, actual); + verify(path, expectedPerm, perm, expectedAcls, acls); break; case 1: - perm = fs.getFileStatus(new Path(locn)).getPermission(); - Assert.assertEquals("Location: " + locn, "rwxrwxr-x", String.valueOf(perm)); + //Assert.assertEquals("Location: " + locn, "rwxrwxr-x", String.valueOf(perm)); - List acls = getAcl(locn); - verifyAcls(aclSpec2, acls); + expectedPerm = fstat.isFile() ? permFile2 : permInherited2; + expectedAcls = fstat.isFile() ? aclFileSpec2 : aclInheritedSpec2; + + verify(path, expectedPerm, perm, expectedAcls, acls); break; default: throw new RuntimeException("Only 2 permissions by this test: " + permIndex); @@ -139,25 +296,42 @@ private AclEntry aclEntry(AclEntryScope scope, AclEntryType type, .setPermission(permission).build(); } - private void verifyAcls(List expectedList, List actualList) { - for (AclEntry expected : expectedList) { - if (expected.getName() != null) { - //the non-named acl's are coming as regular permission, and not as aclEntries. - boolean found = false; - for (AclEntry actual : actualList) { - if (actual.equals(expected)) { - found = true; - } - } - if (!found) { - Assert.fail("Following Acl does not have a match: " + expected); + private void verify(Path path, FsPermission expectedPerm, FsPermission actualPerm, + List expectedAcls, List actualAcls) { + verifyPermissions(path, expectedPerm, actualPerm); + verifyAcls(path, expectedAcls, actualAcls); + } + + private void verifyPermissions(Path path, FsPermission expected, FsPermission actual) { + LOG.debug("Verify permissions on " + path + " expected=" + expected + " actual=" + actual); + + Assert.assertTrue("User permissions on " + path + " differ: expected=" + expected + " actual=" + actual, + expected.getUserAction() == actual.getUserAction()); + Assert.assertTrue("Group/Mask permissions on " + path + " differ: expected=" + expected + " actual=" + actual, + expected.getGroupAction() == actual.getGroupAction()); + Assert.assertTrue("Other permissions on " + path + " differ: expected=" + expected + " actual=" + actual, + expected.getOtherAction() == actual.getOtherAction()); + } + + private void verifyAcls(Path path, List expected, List actual) { + LOG.debug("Verify ACLs on " + path + " expected=" + expected + " actual=" + actual); + + ArrayList acls = new ArrayList(actual); + + for (AclEntry expectedAcl : expected) { + boolean found = false; + for (AclEntry acl : acls) { + if (acl.equals(expectedAcl)) { + acls.remove(acl); + found = true; + break; } } + + Assert.assertTrue("ACLs on " + path + " differ: " + expectedAcl + " expected=" + expected + " actual=" + actual, found); } - } - private void setAcl(String locn, List aclSpec) throws Exception { - fs.setAcl(new Path(locn), aclSpec); + Assert.assertTrue("ACLs on " + path + " are more than expected: expected=" + expected + " actual=" + actual, acls.size() == 0); } private List getAcl(String locn) throws Exception { diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java index 2ae9cc0..bf6b7e1 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java @@ -60,11 +60,11 @@ public boolean accept(Path p){ } }; - public abstract void setPermission(String locn, int permIndex) throws Exception; public abstract void verifyPermission(String locn, int permIndex) throws Exception; + public abstract void verifyInheritedPermission(String locn, int permIndex) throws Exception; public void setPermission(String locn) throws Exception { setPermission(locn, 0); @@ -74,6 +74,9 @@ public void verifyPermission(String locn) throws Exception { verifyPermission(locn, 0); } + public void verifyInheritedPermission(String locn) throws Exception { + verifyInheritedPermission(locn, 0); + } public static void baseSetup() throws Exception { MiniDFSShim dfs = ShimLoader.getHadoopShims().getMiniDfs(conf, 4, true, null); @@ -138,7 +141,7 @@ public void testCreateDb() throws Exception { Assert.assertEquals(0,ret.getResponseCode()); assertExistence(warehouseDir + "/" + testDb + ".db"); - verifyPermission(warehouseDir + "/" + testDb + ".db"); + verifyInheritedPermission(warehouseDir + "/" + testDb + ".db"); ret = driver.run("USE " + testDb); Assert.assertEquals(0,ret.getResponseCode()); @@ -146,22 +149,28 @@ public void testCreateDb() throws Exception { ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)"); Assert.assertEquals(0,ret.getResponseCode()); - verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName); + verifyInheritedPermission(warehouseDir + "/" + testDb + ".db/" + tableName); ret = driver.run("insert into table " + tableName + " select key,value from default.mysrc"); Assert.assertEquals(0,ret.getResponseCode()); assertExistence(warehouseDir + "/" + testDb + ".db/" + tableName); - verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName); + verifyInheritedPermission(warehouseDir + "/" + testDb + ".db/" + tableName); Assert.assertTrue(listStatus(warehouseDir + "/" + testDb + ".db/" + tableName).size() > 0); for (String child : listStatus(warehouseDir + "/" + testDb + ".db/" + tableName)) { - verifyPermission(child); + verifyInheritedPermission(child); } + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); + ret = driver.run("USE default"); Assert.assertEquals(0,ret.getResponseCode()); + ret = driver.run("DROP DATABASE " + testDb); + Assert.assertEquals(0,ret.getResponseCode()); + //cleanup after the test. fs.delete(warehouseDir, true); fs.mkdirs(warehouseDir); @@ -186,21 +195,27 @@ public void testCreateTable() throws Exception { ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)"); Assert.assertEquals(0,ret.getResponseCode()); - verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName); + verifyInheritedPermission(warehouseDir + "/" + testDb + ".db/" + tableName); ret = driver.run("insert into table " + tableName + " select key,value from default.mysrc"); Assert.assertEquals(0,ret.getResponseCode()); assertExistence(warehouseDir + "/" + testDb + ".db/" + tableName); - verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName); + verifyInheritedPermission(warehouseDir + "/" + testDb + ".db/" + tableName); Assert.assertTrue(listStatus(warehouseDir + "/" + testDb + ".db/" + tableName).size() > 0); for (String child : listStatus(warehouseDir + "/" + testDb + ".db/" + tableName)) { - verifyPermission(child); + verifyInheritedPermission(child); } + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); + ret = driver.run("USE default"); Assert.assertEquals(0,ret.getResponseCode()); + + ret = driver.run("DROP DATABASE " + testDb); + Assert.assertEquals(0,ret.getResponseCode()); } @@ -223,7 +238,7 @@ public void testInsertNonPartTable() throws Exception { verifyPermission(warehouseDir + "/" + tableName); Assert.assertTrue(listStatus(tableLoc).size() > 0); for (String child : listStatus(tableLoc)) { - verifyPermission(child); + verifyInheritedPermission(child); } //case1B: insert overwrite non-partitioned-table @@ -234,8 +249,11 @@ public void testInsertNonPartTable() throws Exception { verifyPermission(warehouseDir + "/" + tableName, 1); Assert.assertTrue(listStatus(tableLoc).size() > 0); for (String child : listStatus(tableLoc)) { - verifyPermission(child, 1); + verifyInheritedPermission(child, 1); } + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -252,11 +270,11 @@ public void testInsertStaticSinglePartition() throws Exception { Assert.assertEquals(0, ret.getResponseCode()); verifyPermission(warehouseDir + "/" + tableName); - verifyPermission(warehouseDir + "/" + tableName + "/part1=1"); + verifyInheritedPermission(warehouseDir + "/" + tableName + "/part1=1"); Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1").size() > 0); for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1")) { - verifyPermission(child); + verifyInheritedPermission(child); } //insert overwrite test @@ -270,8 +288,11 @@ public void testInsertStaticSinglePartition() throws Exception { Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1").size() > 0); for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1")) { - verifyPermission(child, 1); + verifyInheritedPermission(child, 1); } + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -288,12 +309,12 @@ public void testInsertStaticDualPartition() throws Exception { Assert.assertEquals(0, ret.getResponseCode()); verifyPermission(warehouseDir + "/" + tableName); - verifyPermission(warehouseDir + "/" + tableName + "/part1=1"); - verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1"); + verifyInheritedPermission(warehouseDir + "/" + tableName + "/part1=1"); + verifyInheritedPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1"); Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0); for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) { - verifyPermission(child); + verifyInheritedPermission(child); } //insert overwrite test @@ -310,8 +331,11 @@ public void testInsertStaticDualPartition() throws Exception { Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0); for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) { - verifyPermission(child, 1); + verifyInheritedPermission(child, 1); } + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -327,7 +351,7 @@ public void testInsertDualDynamicPartitions() throws Exception { ret = driver.run("insert into table " + tableName + " partition (part1,part2) select key,value,part1,part2 from mysrc"); Assert.assertEquals(0, ret.getResponseCode()); - verifyDualPartitionTable(warehouseDir + "/" + tableName, 0); + verifyDualPartitionTable(warehouseDir + "/" + tableName, 0, true); //Insert overwrite test, with permission set 1. We need reset existing partitions to 1 since the permissions //should be inherited from existing partition @@ -335,7 +359,10 @@ public void testInsertDualDynamicPartitions() throws Exception { ret = driver.run("insert overwrite table " + tableName + " partition (part1,part2) select key,value,part1,part2 from mysrc"); Assert.assertEquals(0, ret.getResponseCode()); - verifyDualPartitionTable(warehouseDir + "/" + tableName, 1); + verifyDualPartitionTable(warehouseDir + "/" + tableName, 1, false); + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -358,6 +385,7 @@ public void testInsertSingleDynamicPartition() throws Exception { setSinglePartition(tableLoc, 1); ret = driver.run("insert overwrite table " + tableName + " partition (part1) select key,value,part1 from mysrc"); Assert.assertEquals(0,ret.getResponseCode()); + // NOTE: OVERWRITE deletes partition directory, thus creating a new partition inheriting the previously set permissions. verifySinglePartition(tableLoc, 1); //delete and re-insert using insert overwrite. There's different code paths insert vs insert overwrite for new tables. @@ -373,6 +401,9 @@ public void testInsertSingleDynamicPartition() throws Exception { Assert.assertEquals(0, ret.getResponseCode()); verifySinglePartition(tableLoc, 0); + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -394,13 +425,13 @@ public void testPartition() throws Exception { ret = driver.run("alter table " + tableName + " partition (part1='1',part2='1',part3='1') rename to partition (part1='2',part2='2',part3='2')"); Assert.assertEquals(0,ret.getResponseCode()); - verifyPermission(warehouseDir + "/" + tableName + "/part1=2", 1); - verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2", 1); - verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2", 1); + verifyInheritedPermission(warehouseDir + "/" + tableName + "/part1=2", 1); + verifyInheritedPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2", 1); + verifyInheritedPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2", 1); Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2").size() > 0); for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2")) { - verifyPermission(child, 1); + verifyInheritedPermission(child, 1); } String tableName2 = "alterpart2"; @@ -414,9 +445,15 @@ public void testPartition() throws Exception { //alter exchange can not change base table's permission //alter exchange can only control final partition folder's permission - verifyPermission(warehouseDir + "/" + tableName2 + "/part1=2", 0); - verifyPermission(warehouseDir + "/" + tableName2 + "/part1=2/part2=2", 0); - verifyPermission(warehouseDir + "/" + tableName2 + "/part1=2/part2=2/part3=2", 1); + verifyInheritedPermission(warehouseDir + "/" + tableName2 + "/part1=2", 0); + verifyInheritedPermission(warehouseDir + "/" + tableName2 + "/part1=2/part2=2", 0); + verifyInheritedPermission(warehouseDir + "/" + tableName2 + "/part1=2/part2=2/part3=2", 1); + + ret = driver.run("DROP TABLE " + tableName2); + Assert.assertEquals(0,ret.getResponseCode()); + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -436,8 +473,11 @@ public void testExternalTable() throws Exception { Assert.assertTrue(listStatus(myLocation).size() > 0); for (String child : listStatus(myLocation)) { - verifyPermission(child); + verifyInheritedPermission(child); } + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -459,7 +499,7 @@ public void testLoadLocal() throws Exception { Assert.assertTrue(listStatus(tableLoc).size() > 0); for (String child : listStatus(tableLoc)) { - verifyPermission(child); + verifyInheritedPermission(child); } //case1B: load data local into overwrite non-partitioned-table @@ -472,9 +512,12 @@ public void testLoadLocal() throws Exception { Assert.assertTrue(listStatus(tableLoc).size() > 0); for (String child : listStatus(tableLoc)) { - verifyPermission(child, 1); + verifyInheritedPermission(child, 1); } + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); + //case 2 is partitioned table. tableName = "loadlocalpartition"; @@ -491,7 +534,7 @@ public void testLoadLocal() throws Exception { String partLoc = warehouseDir + "/" + tableName + "/part1=1/part2=1"; Assert.assertTrue(listStatus(partLoc).size() > 0); for (String child : listStatus(partLoc)) { - verifyPermission(child); + verifyInheritedPermission(child); } //case 2B: insert data overwrite into partitioned table. set testing table/partition folder hierarchy 1 @@ -506,8 +549,11 @@ public void testLoadLocal() throws Exception { Assert.assertTrue(listStatus(tableLoc).size() > 0); for (String child : listStatus(partLoc)) { - verifyPermission(child, 1); + verifyInheritedPermission(child, 1); } + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -530,7 +576,7 @@ public void testLoad() throws Exception { Assert.assertTrue(listStatus(tableLoc).size() > 0); for (String child : listStatus(tableLoc)) { - verifyPermission(child); + verifyInheritedPermission(child); } //case1B: load data into overwrite non-partitioned-table @@ -545,9 +591,12 @@ public void testLoad() throws Exception { Assert.assertTrue(listStatus(tableLoc).size() > 0); for (String child : listStatus(tableLoc)) { - verifyPermission(child, 1); + verifyInheritedPermission(child, 1); } + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); + //case 2 is partitioned table. tableName = "loadpartition"; @@ -565,7 +614,7 @@ public void testLoad() throws Exception { String partLoc = warehouseDir + "/" + tableName + "/part1=1/part2=1"; Assert.assertTrue(listStatus(partLoc).size() > 0); for (String child : listStatus(partLoc)) { - verifyPermission(child); + verifyInheritedPermission(child); } //case 2B: insert data overwrite into partitioned table. set testing table/partition folder hierarchy 1 @@ -583,8 +632,11 @@ public void testLoad() throws Exception { Assert.assertTrue(listStatus(tableLoc).size() > 0); for (String child : listStatus(partLoc)) { - verifyPermission(child, 1); + verifyInheritedPermission(child, 1); } + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -605,15 +657,21 @@ public void testCtas() throws Exception { Assert.assertEquals(0,ret.getResponseCode()); assertExistence(warehouseDir + "/" + testDb + ".db/" + tableName); - verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName); + verifyInheritedPermission(warehouseDir + "/" + testDb + ".db/" + tableName); Assert.assertTrue(listStatus(warehouseDir + "/" + testDb + ".db/" + tableName).size() > 0); for (String child : listStatus(warehouseDir + "/" + testDb + ".db/" + tableName)) { - verifyPermission(child); + verifyInheritedPermission(child); } + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); + ret = driver.run("USE default"); Assert.assertEquals(0,ret.getResponseCode()); + + ret = driver.run("DROP DATABASE " + testDb); + Assert.assertEquals(0,ret.getResponseCode()); } @Test @@ -631,20 +689,20 @@ public void testExim() throws Exception { //check if exported data has inherited the permissions. assertExistence(myLocation); - verifyPermission(myLocation); + verifyInheritedPermission(myLocation); assertExistence(myLocation + "/part1=1/part2=1"); - verifyPermission(myLocation + "/part1=1/part2=1"); + verifyInheritedPermission(myLocation + "/part1=1/part2=1"); Assert.assertTrue(listStatus(myLocation + "/part1=1/part2=1").size() > 0); for (String child : listStatus(myLocation + "/part1=1/part2=1")) { - verifyPermission(child); + verifyInheritedPermission(child); } assertExistence(myLocation + "/part1=2/part2=2"); - verifyPermission(myLocation + "/part1=2/part2=2"); + verifyInheritedPermission(myLocation + "/part1=2/part2=2"); Assert.assertTrue(listStatus(myLocation + "/part1=2/part2=2").size() > 0); for (String child : listStatus(myLocation + "/part1=2/part2=2")) { - verifyPermission(child); + verifyInheritedPermission(child); } //import the table back into another database @@ -664,25 +722,31 @@ public void testExim() throws Exception { //check permissions of imported, from the exported table assertExistence(warehouseDir + "/" + testDb + ".db/mysrc"); - verifyPermission(warehouseDir + "/" + testDb + ".db/mysrc", 1); + verifyInheritedPermission(warehouseDir + "/" + testDb + ".db/mysrc", 1); myLocation = warehouseDir + "/" + testDb + ".db/mysrc"; assertExistence(myLocation); - verifyPermission(myLocation, 1); + verifyInheritedPermission(myLocation, 1); assertExistence(myLocation + "/part1=1/part2=1"); - verifyPermission(myLocation + "/part1=1/part2=1", 1); + verifyInheritedPermission(myLocation + "/part1=1/part2=1", 1); Assert.assertTrue(listStatus(myLocation + "/part1=1/part2=1").size() > 0); for (String child : listStatus(myLocation + "/part1=1/part2=1")) { - verifyPermission(child, 1); + verifyInheritedPermission(child, 1); } assertExistence(myLocation + "/part1=2/part2=2"); - verifyPermission(myLocation + "/part1=2/part2=2", 1); + verifyInheritedPermission(myLocation + "/part1=2/part2=2", 1); Assert.assertTrue(listStatus(myLocation + "/part1=2/part2=2").size() > 0); for (String child : listStatus(myLocation + "/part1=2/part2=2")) { - verifyPermission(child, 1); + verifyInheritedPermission(child, 1); } + + ret = driver.run("USE default"); + Assert.assertEquals(0,ret.getResponseCode()); + + ret = driver.run("DROP DATABASE " + testDb + " CASCADE"); + Assert.assertEquals(0,ret.getResponseCode()); } /** @@ -705,13 +769,16 @@ public void testTruncateTable() throws Exception { assertExistence(warehouseDir + "/" + tableName); verifyPermission(warehouseDir + "/" + tableName); - verifyPermission(partition); + verifyInheritedPermission(partition); ret = driver.run("TRUNCATE TABLE " + tableName); Assert.assertEquals(0, ret.getResponseCode()); + // NOTE: TRUNCATE will delete partition directories and recreates them; restores previous permissions. assertExistence(warehouseDir + "/" + tableName); + assertExistence(partition); verifyPermission(warehouseDir + "/" + tableName); + verifyInheritedPermission(partition); ret = driver.run("insert into table " + tableName + " partition(part1='1') select key,value from mysrc where part1='1' and part2='1'"); Assert.assertEquals(0, ret.getResponseCode()); @@ -719,14 +786,17 @@ public void testTruncateTable() throws Exception { verifyPermission(warehouseDir + "/" + tableName); assertExistence(partition); - verifyPermission(partition); + verifyInheritedPermission(partition); // Also test the partition folder if the partition is truncated ret = driver.run("TRUNCATE TABLE " + tableName + " partition(part1='1')"); Assert.assertEquals(0, ret.getResponseCode()); assertExistence(partition); - verifyPermission(partition); + verifyInheritedPermission(partition); + + ret = driver.run("DROP TABLE " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); } private void setSinglePartition(String tableLoc, int index) throws Exception { @@ -735,17 +805,17 @@ private void setSinglePartition(String tableLoc, int index) throws Exception { } private void verifySinglePartition(String tableLoc, int index) throws Exception { - verifyPermission(tableLoc + "/part1=1", index); - verifyPermission(tableLoc + "/part1=2", index); + verifyInheritedPermission(tableLoc + "/part1=1", index); + verifyInheritedPermission(tableLoc + "/part1=2", index); Assert.assertTrue(listStatus(tableLoc + "/part1=1").size() > 0); for (String child : listStatus(tableLoc + "/part1=1")) { - verifyPermission(child, index); + verifyInheritedPermission(child, index); } Assert.assertTrue(listStatus(tableLoc + "/part1=2").size() > 0); for (String child : listStatus(tableLoc + "/part1=2")) { - verifyPermission(child, index); + verifyInheritedPermission(child, index); } } @@ -758,22 +828,27 @@ private void setDualPartitionTable(String baseTablePath, int index) throws Excep setPermission(baseTablePath + "/part1=2/part2=2", index); } - private void verifyDualPartitionTable(String baseTablePath, int index) throws Exception { + private void verifyDualPartitionTable(String baseTablePath, int index, boolean inherit) throws Exception { verifyPermission(baseTablePath, index); - verifyPermission(baseTablePath + "/part1=1", index); - verifyPermission(baseTablePath + "/part1=1/part2=1", index); + if (inherit) { + verifyInheritedPermission(baseTablePath + "/part1=1", index); + verifyInheritedPermission(baseTablePath + "/part1=2", index); + } else { + verifyPermission(baseTablePath + "/part1=1", index); + verifyPermission(baseTablePath + "/part1=2", index); + } - verifyPermission(baseTablePath + "/part1=2", index); - verifyPermission(baseTablePath + "/part1=2/part2=2", index); + verifyInheritedPermission(baseTablePath + "/part1=1/part2=1", index); + verifyInheritedPermission(baseTablePath + "/part1=2/part2=2", index); Assert.assertTrue(listStatus(baseTablePath + "/part1=1/part2=1").size() > 0); for (String child : listStatus(baseTablePath + "/part1=1/part2=1")) { - verifyPermission(child, index); + verifyInheritedPermission(child, index); } Assert.assertTrue(listStatus(baseTablePath + "/part1=2/part2=2").size() > 0); for (String child : listStatus(baseTablePath + "/part1=2/part2=2")) { - verifyPermission(child, index); + verifyInheritedPermission(child, index); } } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java index 6cc2d18..16b75a2 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java @@ -49,4 +49,9 @@ public void verifyPermission(String locn, int permIndex) throws Exception { FsPermission actual = fs.getFileStatus(new Path(locn)).getPermission(); Assert.assertEquals(expected[permIndex], actual); } + + @Override + public void verifyInheritedPermission(String locn, int permIndex) throws Exception { + verifyPermission(locn, permIndex); + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index e320dbf..15f24fb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -2863,7 +2863,7 @@ private static void copyFiles(final HiveConf conf, final FileSystem destFs, } if (inheritPerms) { - HdfsUtils.setFullFileStatus(conf, fullDestStatus, srcGroup, destFs, destPath, false); + HdfsUtils.setFullFileStatus(conf, fullDestStatus, srcGroup, destFs, destPath, false, false); } if (null != newFiles) { newFiles.add(destPath); @@ -3086,7 +3086,7 @@ public Void call() throws Exception { final String group = srcStatus.getGroup(); if(destFs.rename(srcStatus.getPath(), destFile)) { if (inheritPerms) { - HdfsUtils.setFullFileStatus(conf, desiredStatus, group, destFs, destFile, false); + HdfsUtils.setFullFileStatus(conf, desiredStatus, group, destFs, destFile, false, false); } } else { throw new IOException("rename for src path: " + srcStatus.getPath() + " to dest path:" diff --git shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java index 7b6a9bd..9613cfb 100644 --- shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java +++ shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java @@ -60,26 +60,63 @@ public static Path getFileIdPath( public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus, FileSystem fs, Path target, boolean recursion) throws IOException { - setFullFileStatus(conf, sourceStatus, null, fs, target, recursion); + setFullFileStatus(conf, sourceStatus, null, fs, target, recursion, true); } public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus, String targetGroup, FileSystem fs, Path target, boolean recursion) throws IOException { + setFullFileStatus(conf, sourceStatus, targetGroup, fs, target, recursion, true); + } + + public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus, + String targetGroup, FileSystem fs, Path target, boolean recursion, boolean isDir) throws IOException { FileStatus fStatus= sourceStatus.getFileStatus(); String group = fStatus.getGroup(); boolean aclEnabled = Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true"); FsPermission sourcePerm = fStatus.getPermission(); List aclEntries = null; if (aclEnabled) { - if (sourceStatus.getAclEntries() != null) { + if (sourceStatus.getAclEntries() != null && ! sourceStatus.getAclEntries().isEmpty()) { LOG.trace(sourceStatus.aclStatus.toString()); - aclEntries = new ArrayList<>(sourceStatus.getAclEntries()); - removeBaseAclEntries(aclEntries); - //the ACL api's also expect the tradition user/group/other permission in the form of ACL - aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, sourcePerm.getUserAction())); - aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, sourcePerm.getGroupAction())); - aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, sourcePerm.getOtherAction())); + List defaults = extractDefaultAcls(sourceStatus.getAclEntries()); + if (! defaults.isEmpty()) { + // Generate child ACLs based on parent DEFAULTs. + aclEntries = new ArrayList(defaults.size() * 2); + + // All ACCESS ACLs are derived from the DEFAULT ACLs of the parent. + // All DEFAULT ACLs of the parent are inherited by the child. + // If DEFAULT ACLs exist, it should include DEFAULTs for USER, OTHER, and MASK. + for (AclEntry acl : defaults) { + // OTHER permissions are not inherited by the child. + if (acl.getType() == AclEntryType.OTHER) { + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, FsAction.NONE)); + } else { + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, acl.getType(), acl.getName(), acl.getPermission())); + } + } + + // Add DEFAULTs for directories only; adding DEFAULTs for files throws an exception. + if (isDir) { + aclEntries.addAll(defaults); + } + } else { + // Parent has no DEFAULTs, hence child inherits no ACLs. + // Set basic permissions only. + FsAction groupAction = null; + + for (AclEntry acl : sourceStatus.getAclEntries()) { + if (acl.getType() == AclEntryType.GROUP) { + groupAction = acl.getPermission(); + break; + } + } + + aclEntries = new ArrayList(3); + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, sourcePerm.getUserAction())); + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, groupAction)); + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, FsAction.NONE)); + } } } @@ -93,19 +130,16 @@ public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileSta if (group != null && !group.isEmpty()) { run(fsShell, new String[]{"-chgrp", "-R", group, target.toString()}); } - if (aclEnabled) { - if (null != aclEntries) { - //Attempt extended Acl operations only if its enabled, 8791but don't fail the operation regardless. - try { - //construct the -setfacl command - String aclEntry = Joiner.on(",").join(aclEntries); - run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, target.toString()}); - - } catch (Exception e) { - LOG.info("Skipping ACL inheritance: File system for path " + target + " " + - "does not support ACLs but dfs.namenode.acls.enabled is set to true. "); - LOG.debug("The details are: " + e, e); - } + + if (aclEntries != null) { + try { + //construct the -setfacl command + String aclEntry = Joiner.on(",").join(aclEntries); + run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, target.toString()}); + } catch (Exception e) { + LOG.info("Skipping ACL inheritance: File system for path " + target + " " + + "does not support ACLs but dfs.namenode.acls.enabled is set to true. "); + LOG.debug("The details are: " + e, e); } } else { String permission = Integer.toString(sourcePerm.toShort(), 8); @@ -116,15 +150,13 @@ public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileSta } } else { if (group != null && !group.isEmpty()) { - if (targetGroup == null || - !group.equals(targetGroup)) { + if (targetGroup == null || !group.equals(targetGroup)) { fs.setOwner(target, null, group); } } - if (aclEnabled) { - if (null != aclEntries) { - fs.setAcl(target, aclEntries); - } + + if (aclEntries != null) { + fs.setAcl(target, aclEntries); } else { fs.setPermission(target, sourcePerm); } @@ -133,34 +165,48 @@ public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileSta /** * Create a new AclEntry with scope, type and permission (no name). - * - * @param scope - * AclEntryScope scope of the ACL entry - * @param type - * AclEntryType ACL entry type - * @param permission - * FsAction set of permissions in the ACL entry + * @param scope AclEntryScope scope of the ACL entry + * @param type AclEntryType ACL entry type + * @param permission FsAction set of permissions in the ACL entry * @return AclEntry new AclEntry */ private static AclEntry newAclEntry(AclEntryScope scope, AclEntryType type, FsAction permission) { - return new AclEntry.Builder().setScope(scope).setType(type) + return newAclEntry(scope, type, null, permission); + } + + /** + * Create a new AclEntry with scope, type and permission (no name). + * @param scope AclEntryScope scope of the ACL entry + * @param type AclEntryType ACL entry type + * @param name AclEntry name + * @param permission FsAction set of permissions in the ACL entry + * @return AclEntry new AclEntry + */ + private static AclEntry newAclEntry(AclEntryScope scope, AclEntryType type, + String name, FsAction permission) { + return new AclEntry.Builder().setScope(scope).setType(type).setName(name) .setPermission(permission).build(); } + /** - * Removes basic permission acls (unamed acls) from the list of acl entries - * @param entries acl entries to remove from. + * Extracts the DEFAULT ACL entries from the list of acl entries + * @param acls acl entries to extract from + * @return default unnamed acl entries */ - private static void removeBaseAclEntries(List entries) { - Iterables.removeIf(entries, new Predicate() { + private static List extractDefaultAcls(List acls) { + List defaults = new ArrayList(acls); + Iterables.removeIf(defaults, new Predicate() { @Override - public boolean apply(AclEntry input) { - if (input.getName() == null) { + public boolean apply(AclEntry acl) { + if (! acl.getScope().equals(AclEntryScope.DEFAULT)) { return true; } return false; } }); + + return defaults; } private static void run(FsShell shell, String[] command) throws Exception { @@ -168,34 +214,36 @@ private static void run(FsShell shell, String[] command) throws Exception { int retval = shell.run(command); LOG.debug("Return value is :" + retval); } -public static class HadoopFileStatus { - private final FileStatus fileStatus; - private final AclStatus aclStatus; + public static class HadoopFileStatus { - public HadoopFileStatus(Configuration conf, FileSystem fs, Path file) throws IOException { + private final FileStatus fileStatus; + private final AclStatus aclStatus; - FileStatus fileStatus = fs.getFileStatus(file); - AclStatus aclStatus = null; - if (Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true")) { - //Attempt extended Acl operations only if its enabled, but don't fail the operation regardless. - try { - aclStatus = fs.getAclStatus(file); - } catch (Exception e) { - LOG.info("Skipping ACL inheritance: File system for path " + file + " " + - "does not support ACLs but dfs.namenode.acls.enabled is set to true. "); - LOG.debug("The details are: " + e, e); + public HadoopFileStatus(Configuration conf, FileSystem fs, Path file) throws IOException { + + FileStatus fileStatus = fs.getFileStatus(file); + AclStatus aclStatus = null; + if (Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true")) { + //Attempt extended Acl operations only if its enabled, but don't fail the operation regardless. + try { + aclStatus = fs.getAclStatus(file); + } catch (Exception e) { + LOG.info("Skipping ACL inheritance: File system for path " + file + " " + + "does not support ACLs but dfs.namenode.acls.enabled is set to true. "); + LOG.debug("The details are: " + e, e); + } } - }this.fileStatus = fileStatus; - this.aclStatus = aclStatus; - } + this.fileStatus = fileStatus; + this.aclStatus = aclStatus; + } - public FileStatus getFileStatus() { - return fileStatus; - } + public FileStatus getFileStatus() { + return fileStatus; + } - public List getAclEntries() { - return aclStatus == null ? null : Collections.unmodifiableList(aclStatus.getEntries()); + public List getAclEntries() { + return aclStatus == null ? null : Collections.unmodifiableList(aclStatus.getEntries()); + } } } -}