diff --git common/src/java/org/apache/hadoop/hive/common/FileUtils.java common/src/java/org/apache/hadoop/hive/common/FileUtils.java index ee61350..4591678 100644 --- common/src/java/org/apache/hadoop/hive/common/FileUtils.java +++ common/src/java/org/apache/hadoop/hive/common/FileUtils.java @@ -40,6 +40,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.shims.HadoopShims; +import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatus; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Shell; @@ -483,29 +484,23 @@ public static boolean mkdir(FileSystem fs, Path f, boolean inheritPerms, Configu } catch (FileNotFoundException ignore) { } //inherit perms: need to find last existing parent path, and apply its permission on entire subtree. - Path path = f; - List pathsToSet = new ArrayList(); - while (!fs.exists(path)) { - pathsToSet.add(path); - path = path.getParent(); + Path lastExistingParent = f; + Path firstNonExistentParent = null; + while (!fs.exists(lastExistingParent)) { + firstNonExistentParent = lastExistingParent; + lastExistingParent = lastExistingParent.getParent(); } - //at the end of this loop, path is the last-existing parent path. boolean success = fs.mkdirs(f); if (!success) { return false; } else { - FsPermission parentPerm = fs.getFileStatus(path).getPermission(); - String permString = Integer.toString(parentPerm.toShort(), 8); - for (Path pathToSet : pathsToSet) { - LOG.info("Setting permission of parent directory: " + path.toString() + - " on new directory: " + pathToSet.toString()); - try { - FsShell fshell = new FsShell(); - fshell.setConf(conf); - fshell.run(new String[]{"-chmod", "-R", permString, pathToSet.toString()}); - } catch (Exception e) { - LOG.warn("Error setting permissions of " + pathToSet, e); - } + HadoopShims shim = ShimLoader.getHadoopShims(); + HdfsFileStatus fullFileStatus = shim.getFullFileStatus(conf, fs, lastExistingParent); + try { + //set on the entire subtree + shim.setFullFileStatus(conf, fullFileStatus, fs, firstNonExistentParent); + } catch (Exception e) { + LOG.warn("Error setting permissions of " + firstNonExistentParent, e); } return true; } @@ -523,16 +518,10 @@ public static boolean copy(FileSystem srcFS, Path src, boolean copied = FileUtil.copy(srcFS, src, dstFS, dst, deleteSource, overwrite, conf); boolean inheritPerms = conf.getBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS); if (copied && inheritPerms) { - FileStatus destFileStatus = dstFS.getFileStatus(dst); - FsPermission perm = destFileStatus.getPermission(); - String permString = Integer.toString(perm.toShort(), 8); - String group = destFileStatus.getGroup(); - //use FsShell to change group and permissions recursively + HadoopShims shims = ShimLoader.getHadoopShims(); + HdfsFileStatus fullFileStatus = shims.getFullFileStatus(conf, dstFS, dst); try { - FsShell fshell = new FsShell(); - fshell.setConf(conf); - fshell.run(new String[]{"-chgrp", "-R", group, dst.toString()}); - fshell.run(new String[]{"-chmod", "-R", permString, dst.toString()}); + shims.setFullFileStatus(conf, fullFileStatus, dstFS, dst); } catch (Exception e) { LOG.warn("Error setting permissions or group of " + dst, e); } @@ -587,5 +576,4 @@ public static boolean moveToTrash(FileSystem fs, Path f, Configuration conf) thr } return result; } - } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java new file mode 100644 index 0000000..57fe4cf --- /dev/null +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java @@ -0,0 +1,556 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.security; + +import java.net.URI; +import java.util.ArrayList; +import java.util.List; + +import junit.framework.Assert; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.hive.cli.CliSessionState; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim; +import org.apache.hadoop.hive.shims.ShimLoader; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * This test the flag 'hive.warehouse.subdir.inherit.perms'. + */ +public abstract class FolderPermissionBase { + protected static HiveConf conf; + protected static Driver driver; + protected static String dataFileDir; + protected static Path dataFilePath; + protected static FileSystem fs; + + protected static Path warehouseDir; + protected static Path baseDfsDir; + + public static final PathFilter hiddenFileFilter = new PathFilter(){ + public boolean accept(Path p){ + String name = p.getName(); + return !name.startsWith("_") && !name.startsWith("."); + } + }; + + + public abstract void setPermission(String locn, int permIndex) throws Exception; + + public abstract void verifyPermission(String locn, int permIndex) throws Exception; + + + public void setPermission(String locn) throws Exception { + setPermission(locn, 0); + } + + public void verifyPermission(String locn) throws Exception { + verifyPermission(locn, 0); + } + + + public static void baseSetup() throws Exception { + MiniDFSShim dfs = ShimLoader.getHadoopShims().getMiniDfs(conf, 4, true, null); + fs = dfs.getFileSystem(); + baseDfsDir = new Path(new Path(fs.getUri()), "/base"); + fs.mkdirs(baseDfsDir); + warehouseDir = new Path(baseDfsDir, "warehouse"); + fs.mkdirs(warehouseDir); + conf.setVar(ConfVars.METASTOREWAREHOUSE, warehouseDir.toString()); + + dataFileDir = conf.get("test.data.files").replace('\\', '/') + .replace("c:", ""); + dataFilePath = new Path(dataFileDir, "kv1.txt"); + + //set hive conf vars + conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); + conf.setBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS, true); + conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + int port = MetaStoreUtils.findFreePort(); + MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); + + SessionState.start(new CliSessionState(conf)); + driver = new Driver(conf); + + setupDataTable(); + } + + + private static void setupDataTable() throws Exception { + CommandProcessorResponse ret = driver.run("DROP TABLE IF EXISTS mysrc"); + Assert.assertEquals(0,ret.getResponseCode()); + + ret = driver.run("CREATE TABLE mysrc (key STRING, value STRING) PARTITIONED BY (part1 string, part2 string) STORED AS TEXTFILE"); + Assert.assertEquals(0,ret.getResponseCode()); + + ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' INTO TABLE mysrc PARTITION (part1='1',part2='1')"); + Assert.assertEquals(0,ret.getResponseCode()); + + ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' INTO TABLE mysrc PARTITION (part1='2',part2='2')"); + Assert.assertEquals(0,ret.getResponseCode()); + } + + @Test + public void testCreateTable() throws Exception { + String testDb = "mydb"; + String tableName = "createtable"; + CommandProcessorResponse ret = driver.run("CREATE DATABASE " + testDb); + Assert.assertEquals(0,ret.getResponseCode()); + + assertExistence(warehouseDir + "/" + testDb + ".db"); + setPermission(warehouseDir + "/" + testDb + ".db"); + verifyPermission(warehouseDir + "/" + testDb + ".db"); + + ret = driver.run("USE " + testDb); + Assert.assertEquals(0,ret.getResponseCode()); + + ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)"); + Assert.assertEquals(0,ret.getResponseCode()); + + verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName); + + ret = driver.run("insert into table " + tableName + " select key,value from default.mysrc"); + Assert.assertEquals(0,ret.getResponseCode()); + + assertExistence(warehouseDir + "/" + testDb + ".db/" + tableName); + verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName); + + Assert.assertTrue(listStatus(warehouseDir + "/" + testDb + ".db/" + tableName).size() > 0); + for (String child : listStatus(warehouseDir + "/" + testDb + ".db/" + tableName)) { + verifyPermission(child); + } + + ret = driver.run("USE default"); + Assert.assertEquals(0,ret.getResponseCode()); + } + + + @Test + public void testStaticPartition() throws Exception { + String tableName = "staticpart"; + CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string, part2 string)"); + Assert.assertEquals(0,ret.getResponseCode()); + + assertExistence(warehouseDir + "/" + tableName); + setPermission(warehouseDir + "/" + tableName); + + ret = driver.run("insert into table " + tableName + " partition(part1='1', part2='1') select key,value from mysrc where part1='1' and part2='1'"); + Assert.assertEquals(0,ret.getResponseCode()); + + verifyPermission(warehouseDir + "/" + tableName + "/part1=1"); + verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1"); + + Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0); + for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) { + verifyPermission(child); + } + } + + @Test + public void testAlterPartition() throws Exception { + String tableName = "alterpart"; + CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int, part3 int)"); + Assert.assertEquals(0,ret.getResponseCode()); + + assertExistence(warehouseDir + "/" + tableName); + setPermission(warehouseDir + "/" + tableName); + + ret = driver.run("insert into table " + tableName + " partition(part1='1',part2='1',part3='1') select key,value from mysrc"); + Assert.assertEquals(0,ret.getResponseCode()); + + //alter partition + ret = driver.run("alter table " + tableName + " partition (part1='1',part2='1',part3='1') rename to partition (part1='2',part2='2',part3='2')"); + Assert.assertEquals(0,ret.getResponseCode()); + + verifyPermission(warehouseDir + "/" + tableName + "/part1=2"); + verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2"); + verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2"); + + Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2").size() > 0); + for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2")) { + verifyPermission(child); + } + } + + + @Test + public void testDynamicPartitions() throws Exception { + String tableName = "dynamicpart"; + + CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string, part2 string)"); + Assert.assertEquals(0,ret.getResponseCode()); + + assertExistence(warehouseDir + "/" + tableName); + setPermission(warehouseDir + "/" + tableName); + + ret = driver.run("insert into table " + tableName + " partition (part1,part2) select key,value,part1,part2 from mysrc"); + Assert.assertEquals(0,ret.getResponseCode()); + + verifyPermission(warehouseDir + "/" + tableName + "/part1=1"); + verifyPermission(warehouseDir + "/" + tableName + "/part1=1/part2=1"); + + verifyPermission(warehouseDir + "/" + tableName + "/part1=2"); + verifyPermission(warehouseDir + "/" + tableName + "/part1=2/part2=2"); + + Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1").size() > 0); + for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=1/part2=1")) { + verifyPermission(child); + } + + Assert.assertTrue(listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2").size() > 0); + for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2")) { + verifyPermission(child); + } + } + + @Test + public void testExternalTable() throws Exception { + String tableName = "externaltable"; + + String myLocation = warehouseDir + "/myfolder"; + FileSystem fs = FileSystem.get(new URI(myLocation), conf); + fs.mkdirs(new Path(myLocation)); + setPermission(myLocation); + + CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) LOCATION '" + myLocation + "'"); + Assert.assertEquals(0,ret.getResponseCode()); + + ret = driver.run("insert into table " + tableName + " select key,value from mysrc"); + Assert.assertEquals(0,ret.getResponseCode()); + + Assert.assertTrue(listStatus(myLocation).size() > 0); + for (String child : listStatus(myLocation)) { + verifyPermission(child); + } + } + + @Test + public void testInsert() throws Exception { + //case 1 is non-partitioned table. + String tableName = "insert"; + + CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)"); + Assert.assertEquals(0,ret.getResponseCode()); + + String tableLoc = warehouseDir + "/" + tableName; + assertExistence(warehouseDir + "/" + tableName); + + //case1A: insert into non-partitioned table. + setPermission(warehouseDir + "/" + tableName); + ret = driver.run("insert into table " + tableName + " select key,value from mysrc"); + Assert.assertEquals(0,ret.getResponseCode()); + + Assert.assertTrue(listStatus(tableLoc).size() > 0); + for (String child : listStatus(tableLoc)) { + verifyPermission(child); + } + + //case1B: insert overwrite non-partitioned-table + setPermission(warehouseDir + "/" + tableName, 1); + ret = driver.run("insert overwrite table " + tableName + " select key,value from mysrc"); + Assert.assertEquals(0,ret.getResponseCode()); + + Assert.assertTrue(listStatus(tableLoc).size() > 0); + for (String child : listStatus(tableLoc)) { + verifyPermission(child, 1); + } + + //case 2 is partitioned table. + tableName = "insertpartition"; + + ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int)"); + Assert.assertEquals(0,ret.getResponseCode()); + + ret = driver.run("insert overwrite table " + tableName + " partition(part1='1',part2='1') select key,value from mysrc"); + Assert.assertEquals(0,ret.getResponseCode()); + + String partLoc = warehouseDir + "/" + tableName + "/part1=1/part2=1"; + assertExistence(partLoc); + + //case 2A: insert into partitioned table. + setPermission(partLoc); + ret = driver.run("insert overwrite table " + tableName + " partition(part1='1',part2='1') select key,value from mysrc"); + Assert.assertEquals(0,ret.getResponseCode()); + + Assert.assertTrue(listStatus(partLoc).size() > 0); + for (String child : listStatus(partLoc)) { + verifyPermission(child); + } + + //case 2B: insert into non-partitioned table. + setPermission(partLoc, 1); + ret = driver.run("insert overwrite table " + tableName + " partition(part1='1',part2='1') select key,value from mysrc"); + Assert.assertEquals(0,ret.getResponseCode()); + + Assert.assertTrue(listStatus(tableLoc).size() > 0); + for (String child : listStatus(partLoc)) { + verifyPermission(child, 1); + } + } + + @Test + public void testLoadLocal() throws Exception { + //case 1 is non-partitioned table. + String tableName = "loadlocal"; + + CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)"); + Assert.assertEquals(0,ret.getResponseCode()); + + String tableLoc = warehouseDir + "/" + tableName; + assertExistence(warehouseDir + "/" + tableName); + + //case1A: load data local into non-partitioned table. + setPermission(warehouseDir + "/" + tableName); + + ret = driver.run("load data local inpath '" + dataFilePath + "' into table " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); + + Assert.assertTrue(listStatus(tableLoc).size() > 0); + for (String child : listStatus(tableLoc)) { + verifyPermission(child); + } + + //case1B: load data local into overwrite non-partitioned-table + setPermission(warehouseDir + "/" + tableName, 1); + ret = driver.run("load data local inpath '" + dataFilePath + "' overwrite into table " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); + + Assert.assertTrue(listStatus(tableLoc).size() > 0); + for (String child : listStatus(tableLoc)) { + verifyPermission(child, 1); + } + + //case 2 is partitioned table. + tableName = "loadlocalpartition"; + + ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int)"); + Assert.assertEquals(0,ret.getResponseCode()); + tableLoc = warehouseDir + "/" + tableName; + assertExistence(tableLoc); + + //case 2A: load data local into partitioned table. + setPermission(tableLoc); + ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')"); + Assert.assertEquals(0,ret.getResponseCode()); + + String partLoc = warehouseDir + "/" + tableName + "/part1=1/part2=1"; + Assert.assertTrue(listStatus(partLoc).size() > 0); + for (String child : listStatus(partLoc)) { + verifyPermission(child); + } + + //case 2B: insert data overwrite into non-partitioned table. + setPermission(partLoc, 1); + ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' OVERWRITE INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')"); + Assert.assertEquals(0,ret.getResponseCode()); + + Assert.assertTrue(listStatus(tableLoc).size() > 0); + for (String child : listStatus(partLoc)) { + verifyPermission(child, 1); + } + } + + @Test + public void testLoad() throws Exception { + String tableName = "load"; + String location = "/hdfsPath"; + fs.copyFromLocalFile(dataFilePath, new Path(location)); + + //case 1: load data + CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)"); + Assert.assertEquals(0,ret.getResponseCode()); + String tableLoc = warehouseDir + "/" + tableName; + assertExistence(warehouseDir + "/" + tableName); + + //case1A: load data into non-partitioned table. + setPermission(warehouseDir + "/" + tableName); + + ret = driver.run("load data inpath '" + location + "' into table " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); + + Assert.assertTrue(listStatus(tableLoc).size() > 0); + for (String child : listStatus(tableLoc)) { + verifyPermission(child); + } + + //case1B: load data into overwrite non-partitioned-table + setPermission(warehouseDir + "/" + tableName, 1); + fs.copyFromLocalFile(dataFilePath, new Path(location)); + ret = driver.run("load data inpath '" + location + "' overwrite into table " + tableName); + Assert.assertEquals(0,ret.getResponseCode()); + + Assert.assertTrue(listStatus(tableLoc).size() > 0); + for (String child : listStatus(tableLoc)) { + verifyPermission(child, 1); + } + + //case 2 is partitioned table. + tableName = "loadpartition"; + + ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int)"); + Assert.assertEquals(0,ret.getResponseCode()); + tableLoc = warehouseDir + "/" + tableName; + assertExistence(tableLoc); + + //case 2A: load data into partitioned table. + setPermission(tableLoc); + fs.copyFromLocalFile(dataFilePath, new Path(location)); + ret = driver.run("LOAD DATA INPATH '" + location + "' INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')"); + Assert.assertEquals(0,ret.getResponseCode()); + + String partLoc = warehouseDir + "/" + tableName + "/part1=1/part2=1"; + Assert.assertTrue(listStatus(partLoc).size() > 0); + for (String child : listStatus(partLoc)) { + verifyPermission(child); + } + + //case 2B: insert data overwrite into non-partitioned table. + setPermission(partLoc, 1); + fs.copyFromLocalFile(dataFilePath, new Path(location)); + ret = driver.run("LOAD DATA INPATH '" + location + "' OVERWRITE INTO TABLE " + tableName + " PARTITION (part1='1',part2='1')"); + Assert.assertEquals(0,ret.getResponseCode()); + + Assert.assertTrue(listStatus(tableLoc).size() > 0); + for (String child : listStatus(partLoc)) { + verifyPermission(child, 1); + } + } + + @Test + public void testCtas() throws Exception { + String testDb = "ctasdb"; + String tableName = "createtable"; + CommandProcessorResponse ret = driver.run("CREATE DATABASE " + testDb); + Assert.assertEquals(0,ret.getResponseCode()); + + assertExistence(warehouseDir + "/" + testDb + ".db"); + setPermission(warehouseDir + "/" + testDb + ".db"); + verifyPermission(warehouseDir + "/" + testDb + ".db"); + + ret = driver.run("USE " + testDb); + Assert.assertEquals(0,ret.getResponseCode()); + + ret = driver.run("create table " + tableName + " as select key,value from default.mysrc"); + Assert.assertEquals(0,ret.getResponseCode()); + + assertExistence(warehouseDir + "/" + testDb + ".db/" + tableName); + verifyPermission(warehouseDir + "/" + testDb + ".db/" + tableName); + + Assert.assertTrue(listStatus(warehouseDir + "/" + testDb + ".db/" + tableName).size() > 0); + for (String child : listStatus(warehouseDir + "/" + testDb + ".db/" + tableName)) { + verifyPermission(child); + } + + ret = driver.run("USE default"); + Assert.assertEquals(0,ret.getResponseCode()); + } + + @Test + public void testExim() throws Exception { + + //export the table to external file. + String myLocation = warehouseDir + "/exim"; + FileSystem fs = FileSystem.get(new URI(myLocation), conf); + fs.mkdirs(new Path(myLocation)); + setPermission(myLocation); + myLocation = myLocation + "/temp"; + + CommandProcessorResponse ret = driver.run("export table mysrc to '" + myLocation + "'"); + Assert.assertEquals(0,ret.getResponseCode()); + + //check if exported data has inherited the permissions. + assertExistence(myLocation); + verifyPermission(myLocation); + + assertExistence(myLocation + "/part1=1/part2=1"); + verifyPermission(myLocation + "/part1=1/part2=1"); + Assert.assertTrue(listStatus(myLocation + "/part1=1/part2=1").size() > 0); + for (String child : listStatus(myLocation + "/part1=1/part2=1")) { + verifyPermission(child); + } + + assertExistence(myLocation + "/part1=2/part2=2"); + verifyPermission(myLocation + "/part1=2/part2=2"); + Assert.assertTrue(listStatus(myLocation + "/part1=2/part2=2").size() > 0); + for (String child : listStatus(myLocation + "/part1=2/part2=2")) { + verifyPermission(child); + } + + //import the table back into another database + String testDb = "eximdb"; + ret = driver.run("CREATE DATABASE " + testDb); + Assert.assertEquals(0,ret.getResponseCode()); + + //use another permission for this import location, to verify that it is really set (permIndex=2) + assertExistence(warehouseDir + "/" + testDb + ".db"); + setPermission(warehouseDir + "/" + testDb + ".db", 1); + + ret = driver.run("USE " + testDb); + Assert.assertEquals(0,ret.getResponseCode()); + + ret = driver.run("import from '" + myLocation + "'"); + Assert.assertEquals(0,ret.getResponseCode()); + + //check permissions of imported, from the exported table + assertExistence(warehouseDir + "/" + testDb + ".db/mysrc"); + verifyPermission(warehouseDir + "/" + testDb + ".db/mysrc", 1); + + myLocation = warehouseDir + "/" + testDb + ".db/mysrc"; + assertExistence(myLocation); + verifyPermission(myLocation, 1); + + assertExistence(myLocation + "/part1=1/part2=1"); + verifyPermission(myLocation + "/part1=1/part2=1", 1); + Assert.assertTrue(listStatus(myLocation + "/part1=1/part2=1").size() > 0); + for (String child : listStatus(myLocation + "/part1=1/part2=1")) { + verifyPermission(child, 1); + } + + assertExistence(myLocation + "/part1=2/part2=2"); + verifyPermission(myLocation + "/part1=2/part2=2", 1); + Assert.assertTrue(listStatus(myLocation + "/part1=2/part2=2").size() > 0); + for (String child : listStatus(myLocation + "/part1=2/part2=2")) { + verifyPermission(child, 1); + } + } + + private void assertExistence(String locn) throws Exception { + Assert.assertTrue(fs.exists(new Path(locn))); + } + + private List listStatus(String locn) throws Exception { + List results = new ArrayList(); + FileStatus[] listStatus = fs.listStatus(new Path(locn)); + for (FileStatus status : listStatus) { + results.add(status.getPath().toString()); + } + return results; + } +} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java new file mode 100644 index 0000000..44a33d8 --- /dev/null +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestExtendedAcls.java @@ -0,0 +1,165 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.security; + +import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS; +import static org.apache.hadoop.fs.permission.AclEntryType.GROUP; +import static org.apache.hadoop.fs.permission.AclEntryType.OTHER; +import static org.apache.hadoop.fs.permission.AclEntryType.USER; + +import java.util.List; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hive.conf.HiveConf; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.google.common.collect.Lists; + +public class TestExtendedAcls extends FolderPermissionBase { + + @BeforeClass + public static void setup() throws Exception { + conf = new HiveConf(TestExtendedAcls.class); + //setup the mini DFS with acl's enabled. + conf.set("dfs.namenode.acls.enabled", "true"); + baseSetup(); + } + + List aclSpec1 = Lists.newArrayList( + aclEntry(ACCESS, USER, FsAction.ALL), + aclEntry(ACCESS, GROUP, FsAction.ALL), + aclEntry(ACCESS, OTHER, FsAction.ALL), + aclEntry(ACCESS, USER, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, USER, "foo", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar", FsAction.READ_WRITE), + aclEntry(ACCESS, GROUP, "foo", FsAction.READ_EXECUTE)); + + List aclSpec2 = Lists.newArrayList( + aclEntry(ACCESS, USER, FsAction.ALL), + aclEntry(ACCESS, GROUP, FsAction.ALL), + aclEntry(ACCESS, OTHER, FsAction.READ_EXECUTE), + aclEntry(ACCESS, USER, "bar2", FsAction.READ_WRITE), + aclEntry(ACCESS, USER, "foo2", FsAction.READ_EXECUTE), + aclEntry(ACCESS, GROUP, "bar2", FsAction.READ), + aclEntry(ACCESS, GROUP, "foo2", FsAction.READ_EXECUTE)); + + @Override + public void setPermission(String locn, int permIndex) throws Exception { + switch (permIndex) { + case 0: + setAcl(locn, aclSpec1); + break; + case 1: + setAcl(locn, aclSpec2); + break; + default: + throw new RuntimeException("Only 2 permissions by this test"); + } + } + + @Override + public void verifyPermission(String locn, int permIndex) throws Exception { + switch (permIndex) { + case 0: + FsPermission perm = fs.getFileStatus(new Path(locn)).getPermission(); + Assert.assertEquals(perm.toString(), "rwxrwxrwx"); + + List actual = getAcl(locn); + verifyAcls(aclSpec1, actual); + break; + case 1: + perm = fs.getFileStatus(new Path(locn)).getPermission(); + Assert.assertEquals(perm.toString(), "rwxrwxr-x"); + + List acls = getAcl(locn); + verifyAcls(aclSpec2, acls); + break; + default: + throw new RuntimeException("Only 2 permissions by this test"); + } + } + + /** + * Create a new AclEntry with scope, type and permission (no name). + * + * @param scope + * AclEntryScope scope of the ACL entry + * @param type + * AclEntryType ACL entry type + * @param permission + * FsAction set of permissions in the ACL entry + * @return AclEntry new AclEntry + */ + private AclEntry aclEntry(AclEntryScope scope, AclEntryType type, + FsAction permission) { + return new AclEntry.Builder().setScope(scope).setType(type) + .setPermission(permission).build(); + } + + /** + * Create a new AclEntry with scope, type, name and permission. + * + * @param scope + * AclEntryScope scope of the ACL entry + * @param type + * AclEntryType ACL entry type + * @param name + * String optional ACL entry name + * @param permission + * FsAction set of permissions in the ACL entry + * @return AclEntry new AclEntry + */ + private AclEntry aclEntry(AclEntryScope scope, AclEntryType type, + String name, FsAction permission) { + return new AclEntry.Builder().setScope(scope).setType(type).setName(name) + .setPermission(permission).build(); + } + + private void verifyAcls(List expectedList, List actualList) { + for (AclEntry expected : expectedList) { + if (expected.getName() != null) { + //the non-named acl's are coming as regular permission, and not as aclEntries. + boolean found = false; + for (AclEntry actual : actualList) { + if (actual.equals(expected)) { + found = true; + } + } + if (!found) { + Assert.fail("Following Acl does not have a match: " + expected); + } + } + } + } + + private void setAcl(String locn, List aclSpec) throws Exception { + fs.setAcl(new Path(locn), aclSpec); + } + + private List getAcl(String locn) throws Exception { + return fs.getAclStatus(new Path(locn)).getEntries(); + } +} diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java index 4f566d2..5ba9e56 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestFolderPermissions.java @@ -15,345 +15,37 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hive.ql.security; -import java.net.URI; -import java.util.ArrayList; -import java.util.List; - import junit.framework.Assert; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; -import org.apache.hadoop.hive.ql.Driver; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.shims.HadoopShims; -import org.apache.hadoop.hive.shims.ShimLoader; import org.junit.BeforeClass; import org.junit.Test; -/** - * This test the flag 'hive.warehouse.subdir.inherit.perms'. - */ -public class TestFolderPermissions { - protected static HiveConf conf; - protected static Driver driver; - protected static String dataFileDir; - protected static Path dataFilePath; - protected static String testDir; - protected static FileSystem fs; - - public static final PathFilter hiddenFileFilter = new PathFilter(){ - public boolean accept(Path p){ - String name = p.getName(); - return !name.startsWith("_") && !name.startsWith("."); - } - }; - - +public class TestFolderPermissions extends FolderPermissionBase { @BeforeClass - public static void setUp() throws Exception { - testDir = System.getProperty("test.warehouse.dir"); - + public static void setup() throws Exception { conf = new HiveConf(TestFolderPermissions.class); - fs = FileSystem.get(new URI(testDir), conf); - dataFileDir = conf.get("test.data.files").replace('\\', '/') - .replace("c:", ""); - dataFilePath = new Path(dataFileDir, "kv1.txt"); - - int port = MetaStoreUtils.findFreePort(); - conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - conf.setBoolVar(HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS, true); - conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); - - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); - - SessionState.start(new CliSessionState(conf)); - driver = new Driver(conf); - - setupDataTable(); - } - - - private static void setupDataTable() throws Exception { - CommandProcessorResponse ret = driver.run("DROP TABLE IF EXISTS mysrc"); - Assert.assertEquals(0,ret.getResponseCode()); - - ret = driver.run("CREATE TABLE mysrc (key STRING, value STRING) PARTITIONED BY (part1 string, part2 string) STORED AS TEXTFILE"); - Assert.assertEquals(0,ret.getResponseCode()); - - ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' INTO TABLE mysrc PARTITION (part1='1',part2='1')"); - Assert.assertEquals(0,ret.getResponseCode()); - - ret = driver.run("LOAD DATA LOCAL INPATH '" + dataFilePath + "' INTO TABLE mysrc PARTITION (part1='2',part2='2')"); - Assert.assertEquals(0,ret.getResponseCode()); - } - - @Test - public void testCreateTablePerms() throws Exception { - String testDb = "mydb"; - String tableName = "createtable"; - CommandProcessorResponse ret = driver.run("CREATE DATABASE " + testDb); - Assert.assertEquals(0,ret.getResponseCode()); - - assertExistence(testDir + "/" + testDb + ".db"); - setPermissions(testDir + "/" + testDb + ".db", FsPermission.createImmutable((short) 0777)); - - ret = driver.run("USE " + testDb); - Assert.assertEquals(0,ret.getResponseCode()); - - ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)"); - Assert.assertEquals(0,ret.getResponseCode()); - - ret = driver.run("insert into table " + tableName + " select key,value from default.mysrc"); - - assertExistence(testDir + "/" + testDb + ".db/" + tableName); - Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + testDb + ".db/" + tableName).toString()); - - ret = driver.run("USE default"); - Assert.assertEquals(0,ret.getResponseCode()); - } - - - @Test - public void testStaticPartitionPerms() throws Exception { - String tableName = "staticpart"; - CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string, part2 string)"); - Assert.assertEquals(0,ret.getResponseCode()); - - assertExistence(testDir + "/" + tableName); - setPermissions(testDir + "/" + tableName, FsPermission.createImmutable((short) 0777)); - - - ret = driver.run("insert into table " + tableName + " partition(part1='1', part2='1') select key,value from mysrc where part1='1' and part2='1'"); - Assert.assertEquals(0,ret.getResponseCode()); - - Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + tableName + "/part1=1").toString()); - Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + tableName + "/part1=1/part2=1").toString()); - - Assert.assertTrue(listChildrenPerms(testDir + "/" + tableName + "/part1=1/part2=1").size() > 0); - for (FsPermission perm : listChildrenPerms(testDir + "/" + tableName + "/part1=1/part2=1")) { - Assert.assertEquals("rwxrwxrwx", perm.toString()); - } + baseSetup(); } - @Test - public void testAlterPartitionPerms() throws Exception { - String tableName = "alterpart"; - CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int, part3 int)"); - Assert.assertEquals(0,ret.getResponseCode()); - - assertExistence(testDir + "/" + tableName); - setPermissions(testDir + "/" + tableName, FsPermission.createImmutable((short) 0777)); - - ret = driver.run("insert into table " + tableName + " partition(part1='1',part2='1',part3='1') select key,value from mysrc"); - Assert.assertEquals(0,ret.getResponseCode()); - - //alter partition - ret = driver.run("alter table " + tableName + " partition (part1='1',part2='1',part3='1') rename to partition (part1='2',part2='2',part3='2')"); - Assert.assertEquals(0,ret.getResponseCode()); - - Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + tableName + "/part1=2").toString()); - Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + tableName + "/part1=2/part2=2").toString()); - Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + tableName + "/part1=2/part2=2/part3=2").toString()); - - Assert.assertTrue(listChildrenPerms(testDir + "/" + tableName + "/part1=2/part2=2/part3=2").size() > 0); - for (FsPermission perm : listChildrenPerms(testDir + "/" + tableName + "/part1=2/part2=2/part3=2")) { - Assert.assertEquals("rwxrwxrwx", perm.toString()); - } - } - - - @Test - public void testDynamicPartitions() throws Exception { - String tableName = "dynamicpart"; - - CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 string, part2 string)"); - Assert.assertEquals(0,ret.getResponseCode()); - - assertExistence(testDir + "/" + tableName); - setPermissions(testDir + "/" + tableName, FsPermission.createImmutable((short) 0777)); - - ret = driver.run("insert into table " + tableName + " partition (part1,part2) select key,value,part1,part2 from mysrc"); - Assert.assertEquals(0,ret.getResponseCode()); - - Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + tableName + "/part1=1").toString()); - Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + tableName + "/part1=1/part2=1").toString()); - - Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + tableName + "/part1=2").toString()); - Assert.assertEquals("rwxrwxrwx", getPermissions(testDir + "/" + tableName + "/part1=2/part2=2").toString()); - - Assert.assertTrue(listChildrenPerms(testDir + "/" + tableName + "/part1=1/part2=1").size() > 0); - for (FsPermission perm : listChildrenPerms(testDir + "/" + tableName + "/part1=1/part2=1")) { - Assert.assertEquals("rwxrwxrwx", perm.toString()); - } - - Assert.assertTrue(listChildrenPerms(testDir + "/" + tableName + "/part1=2/part2=2").size() > 0); - for (FsPermission perm : listChildrenPerms(testDir + "/" + tableName + "/part1=2/part2=2")) { - Assert.assertEquals("rwxrwxrwx", perm.toString()); - } - } - - @Test - public void testExternalTable() throws Exception { - String tableName = "externaltable"; - - String myLocation = testDir + "/myfolder"; - FileSystem fs = FileSystem.get(new URI(myLocation), conf); - fs.mkdirs(new Path(myLocation), FsPermission.createImmutable((short) 0777)); - - CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) LOCATION '" + myLocation + "'"); - Assert.assertEquals(0,ret.getResponseCode()); - - ret = driver.run("insert into table " + tableName + " select key,value from mysrc"); - Assert.assertEquals(0,ret.getResponseCode()); - - Assert.assertTrue(listChildrenPerms(myLocation).size() > 0); - for (FsPermission perm : listChildrenPerms(myLocation)) { - Assert.assertEquals("rwxrwxrwx", perm.toString()); - } - } - - @Test - public void testInsertOverwrite() throws Exception { - //case 1 is non-partitioned table. - String tableName = "insertoverwrite"; - - CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string)"); - Assert.assertEquals(0,ret.getResponseCode()); - - String tableLoc = testDir + "/" + tableName; - assertExistence(testDir + "/" + tableName); - setPermissions(testDir + "/" + tableName, FsPermission.createImmutable((short) 0777)); - - ret = driver.run("insert overwrite table " + tableName + " select key,value from mysrc"); - Assert.assertEquals(0,ret.getResponseCode()); - - Assert.assertTrue(listChildrenPerms(tableLoc).size() > 0); - for (FsPermission perm : listChildrenPerms(tableLoc)) { - Assert.assertEquals("rwxrwxrwx", perm.toString()); - } - - //case 2 is partitioned table. - tableName = "insertoverwritepartition"; - - ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int)"); - Assert.assertEquals(0,ret.getResponseCode()); - - ret = driver.run("insert overwrite table " + tableName + " partition(part1='1',part2='1') select key,value from mysrc"); - Assert.assertEquals(0,ret.getResponseCode()); - - String partLoc = testDir + "/" + tableName + "/part1=1/part2=1"; - assertExistence(partLoc); - setPermissions(partLoc, FsPermission.createImmutable((short) 0777)); - - ret = driver.run("insert overwrite table " + tableName + " partition(part1='1',part2='1') select key,value from mysrc"); - Assert.assertEquals(0,ret.getResponseCode()); - - Assert.assertTrue(listChildrenPerms(tableLoc).size() > 0); - for (FsPermission perm : listChildrenPerms(tableLoc)) { - Assert.assertEquals("rwxrwxrwx", perm.toString()); - } - } - - @Test - public void testEximPermissionInheritance() throws Exception { - - //export the table to external file. - String myLocation = testDir + "/exim"; - FileSystem fs = FileSystem.get(new URI(myLocation), conf); - fs.mkdirs(new Path(myLocation), FsPermission.createImmutable((short) 0777)); - - myLocation = myLocation + "/temp"; - - CommandProcessorResponse ret = driver.run("export table mysrc to '" + myLocation + "'"); - Assert.assertEquals(0,ret.getResponseCode()); - - //check if exported data has inherited the permissions. - assertExistence(myLocation); - Assert.assertEquals(getPermissions(myLocation).toString(), "rwxrwxrwx"); - - assertExistence(myLocation + "/part1=1/part2=1"); - Assert.assertEquals(getPermissions(myLocation + "/part1=1/part2=1").toString(), "rwxrwxrwx"); - Assert.assertTrue(listChildrenPerms(myLocation + "/part1=1/part2=1").size() > 0); - for (FsPermission perm : listChildrenPerms(myLocation + "/part1=1/part2=1")) { - Assert.assertEquals("rwxrwxrwx", perm.toString()); - } - - assertExistence(myLocation + "/part1=2/part2=2"); - Assert.assertEquals(getPermissions(myLocation + "/part1=2/part2=2").toString(), "rwxrwxrwx"); - Assert.assertTrue(listChildrenPerms(myLocation + "/part1=2/part2=2").size() > 0); - for (FsPermission perm : listChildrenPerms(myLocation + "/part1=2/part2=2")) { - Assert.assertEquals("rwxrwxrwx", perm.toString()); - } - - //import the table back into another database - String testDb = "eximdb"; - ret = driver.run("CREATE DATABASE " + testDb); - Assert.assertEquals(0,ret.getResponseCode()); - - //use another permission for this import location, to verify that it is really set. - assertExistence(testDir + "/" + testDb + ".db"); - setPermissions(testDir + "/" + testDb + ".db", FsPermission.createImmutable((short) 0766)); - - ret = driver.run("USE " + testDb); - Assert.assertEquals(0,ret.getResponseCode()); - - ret = driver.run("import from '" + myLocation + "'"); - Assert.assertEquals(0,ret.getResponseCode()); - - //check permissions of imported, from the exported table - assertExistence(testDir + "/" + testDb + ".db/mysrc"); - Assert.assertEquals("rwxrw-rw-", getPermissions(testDir + "/" + testDb + ".db/mysrc").toString()); - - myLocation = testDir + "/" + testDb + ".db/mysrc"; - assertExistence(myLocation); - Assert.assertEquals(getPermissions(myLocation).toString(), "rwxrw-rw-"); - - assertExistence(myLocation + "/part1=1/part2=1"); - Assert.assertEquals(getPermissions(myLocation + "/part1=1/part2=1").toString(), "rwxrw-rw-"); - Assert.assertTrue(listChildrenPerms(myLocation + "/part1=1/part2=1").size() > 0); - for (FsPermission perm : listChildrenPerms(myLocation + "/part1=1/part2=1")) { - Assert.assertEquals("rwxrw-rw-", perm.toString()); - } - - assertExistence(myLocation + "/part1=2/part2=2"); - Assert.assertEquals(getPermissions(myLocation + "/part1=2/part2=2").toString(), "rwxrw-rw-"); - Assert.assertTrue(listChildrenPerms(myLocation + "/part1=2/part2=2").size() > 0); - for (FsPermission perm : listChildrenPerms(myLocation + "/part1=2/part2=2")) { - Assert.assertEquals("rwxrw-rw-", perm.toString()); - } - } - - - private void setPermissions(String locn, FsPermission permissions) throws Exception { - fs.setPermission(new Path(locn), permissions); - } - - private FsPermission getPermissions(String locn) throws Exception { - return fs.getFileStatus(new Path(locn)).getPermission(); - } + public FsPermission[] expected = new FsPermission[] { + FsPermission.createImmutable((short) 0777), + FsPermission.createImmutable((short) 0766) + }; - private void assertExistence(String locn) throws Exception { - Assert.assertTrue(fs.exists(new Path(locn))); + @Override + public void setPermission(String locn, int permIndex) throws Exception { + fs.setPermission(new Path(locn), expected[permIndex]); } - private List listChildrenPerms(String locn) throws Exception { - HadoopShims hadoopShims = ShimLoader.getHadoopShims(); - List result = new ArrayList(); - List fileStatuses = hadoopShims.listLocatedStatus(fs, new Path(locn), hiddenFileFilter); - for (FileStatus status : fileStatuses) { - result.add(status.getPermission()); - } - return result; + @Override + public void verifyPermission(String locn, int permIndex) throws Exception { + FsPermission actual = fs.getFileStatus(new Path(locn)).getPermission(); + Assert.assertEquals(expected[permIndex], actual); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 3417474..a988b44 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -45,10 +45,8 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.common.ObjectPair; @@ -105,6 +103,8 @@ import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; +import org.apache.hadoop.hive.shims.HadoopShims; +import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatus; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.mapred.InputFormat; import org.apache.hadoop.util.StringUtils; @@ -2244,33 +2244,29 @@ private static boolean destExists(List> result, Path proposed) { public static boolean renameFile(HiveConf conf, Path srcf, Path destf, FileSystem fs, boolean replace, boolean isSrcLocal) throws HiveException { boolean success = false; + + //needed for perm inheritance. boolean inheritPerms = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS); - String group = null; - String permission = null; + HadoopShims shims = ShimLoader.getHadoopShims(); + HadoopShims.HdfsFileStatus destStatus = null; try { if (inheritPerms || replace) { try{ - FileStatus deststatus = fs.getFileStatus(destf); - if (inheritPerms) { - group = deststatus.getGroup(); - permission= Integer.toString(deststatus.getPermission().toShort(), 8); - } + destStatus = shims.getFullFileStatus(conf, fs, destf); //if destf is an existing directory: //if replace is true, delete followed by rename(mv) is equivalent to replace //if replace is false, rename (mv) actually move the src under dest dir //if destf is an existing file, rename is actually a replace, and do not need // to delete the file first - if (replace && deststatus.isDir()) { + if (replace && destStatus.getFileStatus().isDir()) { fs.delete(destf, true); } } catch (FileNotFoundException ignore) { //if dest dir does not exist, any re if (inheritPerms) { - FileStatus deststatus = fs.getFileStatus(destf.getParent()); - group = deststatus.getGroup(); - permission= Integer.toString(deststatus.getPermission().toShort(), 8); + destStatus = shims.getFullFileStatus(conf, fs, destf.getParent()); } } } @@ -2289,14 +2285,10 @@ public static boolean renameFile(HiveConf conf, Path srcf, Path destf, } if (success && inheritPerms) { - //use FsShell to change group and permissions recursively try { - FsShell fshell = new FsShell(); - fshell.setConf(conf); - fshell.run(new String[]{"-chgrp", "-R", group, destf.toString()}); - fshell.run(new String[]{"-chmod", "-R", permission, destf.toString()}); - } catch (Exception e) { - throw new HiveException("Unable to set permissions of " + destf, e); + ShimLoader.getHadoopShims().setFullFileStatus(conf, destStatus, fs, destf); + } catch (IOException e) { + LOG.warn("Error setting permission of file " + destf + ": "+ StringUtils.stringifyException(e)); } } return success; diff --git shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java index 5c19ee5..db15a4a 100644 --- shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java +++ shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java @@ -40,12 +40,14 @@ import javax.security.auth.Subject; import javax.security.auth.login.LoginException; +import org.apache.commons.lang.ArrayUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.ProxyFileSystem; @@ -78,9 +80,6 @@ import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.VersionInfo; -/** - * Implemention of shims against Hadoop 0.20.0. - */ public class Hadoop20Shims implements HadoopShims { /** @@ -636,6 +635,51 @@ public void hflush(FSDataOutputStream stream) throws IOException { } @Override + public HdfsFileStatus getFullFileStatus(Configuration conf, FileSystem fs, Path file) + throws IOException { + return new Hadoop20FileStatus(fs.getFileStatus(file)); + } + + @Override + public void setFullFileStatus(Configuration conf, HdfsFileStatus sourceStatus, + FileSystem fs, Path target) throws IOException { + String group = sourceStatus.getFileStatus().getGroup(); + String permission = Integer.toString(sourceStatus.getFileStatus().getPermission().toShort(), 8); + //use FsShell to change group and permissions recursively + try { + FsShell fshell = new FsShell(); + fshell.setConf(conf); + run(fshell, new String[]{"-chgrp", "-R", group, target.toString()}); + run(fshell, new String[]{"-chmod", "-R", permission, target.toString()}); + } catch (Exception e) { + throw new IOException("Unable to set permissions of " + target, e); + } + try { + if (LOG.isDebugEnabled()) { //some trace logging + getFullFileStatus(conf, fs, target).debugLog(); + } + } catch (Exception e) { + //ignore. + } + } + + public class Hadoop20FileStatus implements HdfsFileStatus { + private FileStatus fileStatus; + public Hadoop20FileStatus(FileStatus fileStatus) { + this.fileStatus = fileStatus; + } + @Override + public FileStatus getFileStatus() { + return fileStatus; + } + public void debugLog() { + if (fileStatus != null) { + LOG.debug(fileStatus.toString()); + } + } + } + + @Override public void authorizeProxyAccess(String proxyUser, UserGroupInformation realUserUgi, String ipAddress, Configuration conf) throws IOException { // This hadoop version doesn't have proxy verification @@ -808,4 +852,9 @@ public FileSystem getNonCachedFileSystem(URI uri, Configuration conf) throws IOE conf.setBoolean("fs." + uri.getScheme() + ".impl.disable.cache", origDisableHDFSCache); return fs; } -} \ No newline at end of file + + protected void run(FsShell shell, String[] command) throws Exception { + LOG.debug(ArrayUtils.toString(command)); + shell.run(command); + } +} diff --git shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java index 4a0e72d..cf1abad 100644 --- shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java +++ shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.ProxyFileSystem; @@ -402,6 +403,52 @@ public void hflush(FSDataOutputStream stream) throws IOException { } @Override + public HdfsFileStatus getFullFileStatus(Configuration conf, FileSystem fs, Path file) + throws IOException { + return new Hadoop20SFileStatus(fs.getFileStatus(file)); + } + + @Override + public void setFullFileStatus(Configuration conf, HdfsFileStatus sourceStatus, + FileSystem fs, Path target) throws IOException { + String group = sourceStatus.getFileStatus().getGroup(); + String permission = Integer.toString(sourceStatus.getFileStatus().getPermission().toShort(), 8); + //use FsShell to change group and permissions recursively + try { + FsShell fshell = new FsShell(); + fshell.setConf(conf); + run(fshell, new String[]{"-chgrp", "-R", group, target.toString()}); + run(fshell, new String[]{"-chmod", "-R", permission, target.toString()}); + } catch (Exception e) { + throw new IOException("Unable to set permissions of " + target, e); + } + try { + if (LOG.isDebugEnabled()) { //some trace logging + getFullFileStatus(conf, fs, target).debugLog(); + } + } catch (Exception e) { + //ignore. + } + } + + public class Hadoop20SFileStatus implements HdfsFileStatus { + private FileStatus fileStatus; + public Hadoop20SFileStatus(FileStatus fileStatus) { + this.fileStatus = fileStatus; + } + @Override + public FileStatus getFileStatus() { + return fileStatus; + } + @Override + public void debugLog() { + if (fileStatus != null) { + LOG.debug(fileStatus.toString()); + } + } + } + + @Override public FileSystem createProxyFileSystem(FileSystem fs, URI uri) { return new ProxyFileSystem(fs, uri); } diff --git shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java index d6336e2..362c51b 100644 --- shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java +++ shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java @@ -28,6 +28,7 @@ import java.util.List; import java.util.Map; +import org.apache.commons.lang.ArrayUtils; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; @@ -35,12 +36,19 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.ProxyFileSystem; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.Trash; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclEntryScope; +import org.apache.hadoop.fs.permission.AclEntryType; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.mapred.ClusterStatus; @@ -64,6 +72,11 @@ import org.apache.hadoop.util.Progressable; import org.apache.tez.test.MiniTezCluster; +import com.google.common.base.Joiner; +import com.google.common.base.Objects; +import com.google.common.base.Predicate; +import com.google.common.collect.Iterables; + /** * Implemention of shims against Hadoop 0.23.0. */ @@ -167,6 +180,10 @@ public String getJobLauncherHttpAddress(Configuration conf) { return conf.get("yarn.resourcemanager.webapp.address"); } + protected boolean isExtendedAclEnabled(Configuration conf) { + return Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true"); + } + @Override public long getDefaultBlockSize(FileSystem fs, Path path) { return fs.getDefaultBlockSize(path); @@ -490,6 +507,115 @@ public void hflush(FSDataOutputStream stream) throws IOException { stream.hflush(); } + @Override + public HdfsFileStatus getFullFileStatus(Configuration conf, FileSystem fs, + Path file) throws IOException { + FileStatus fileStatus = fs.getFileStatus(file); + AclStatus aclStatus = null; + if (isExtendedAclEnabled(conf)) { + aclStatus = fs.getAclStatus(file); + } + return new Hadoop23FileStatus(fileStatus, aclStatus); + } + + @Override + public void setFullFileStatus(Configuration conf, HdfsFileStatus sourceStatus, + FileSystem fs, Path target) throws IOException { + String group = sourceStatus.getFileStatus().getGroup(); + //use FsShell to change group, permissions, and extended ACL's recursively + try { + FsShell fsShell = new FsShell(); + fsShell.setConf(conf); + run(fsShell, new String[]{"-chgrp", "-R", group, target.toString()}); + + if (isExtendedAclEnabled(conf)) { + AclStatus aclStatus = ((Hadoop23FileStatus) sourceStatus).getAclStatus(); + List aclEntries = aclStatus.getEntries(); + removeBaseAclEntries(aclEntries); + + //the ACL api's also expect the tradition user/group/other permission in the form of ACL + FsPermission sourcePerm = sourceStatus.getFileStatus().getPermission(); + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, sourcePerm.getUserAction())); + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, sourcePerm.getGroupAction())); + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, sourcePerm.getOtherAction())); + + //construct the -setfacl command + String aclEntry = Joiner.on(",").join(aclStatus.getEntries()); + run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, target.toString()}); + } else { + String permission = Integer.toString(sourceStatus.getFileStatus().getPermission().toShort(), 8); + run(fsShell, new String[]{"-chmod", "-R", permission, target.toString()}); + } + } catch (Exception e) { + throw new IOException("Unable to set permissions of " + target, e); + } + try { + if (LOG.isDebugEnabled()) { //some trace logging + getFullFileStatus(conf, fs, target).debugLog(); + } + } catch (Exception e) { + //ignore. + } + } + + public class Hadoop23FileStatus implements HdfsFileStatus { + private FileStatus fileStatus; + private AclStatus aclStatus; + public Hadoop23FileStatus(FileStatus fileStatus, AclStatus aclStatus) { + this.fileStatus = fileStatus; + this.aclStatus = aclStatus; + } + @Override + public FileStatus getFileStatus() { + return fileStatus; + } + public AclStatus getAclStatus() { + return aclStatus; + } + @Override + public void debugLog() { + if (fileStatus != null) { + LOG.debug(fileStatus.toString()); + } + if (aclStatus != null) { + LOG.debug(aclStatus.toString()); + } + } + } + + /** + * Create a new AclEntry with scope, type and permission (no name). + * + * @param scope + * AclEntryScope scope of the ACL entry + * @param type + * AclEntryType ACL entry type + * @param permission + * FsAction set of permissions in the ACL entry + * @return AclEntry new AclEntry + */ + private AclEntry newAclEntry(AclEntryScope scope, AclEntryType type, + FsAction permission) { + return new AclEntry.Builder().setScope(scope).setType(type) + .setPermission(permission).build(); + } + + /** + * Removes basic permission acls (unamed acls) from the list of acl entries + * @param entries acl entries to remove from. + */ + private void removeBaseAclEntries(List entries) { + Iterables.removeIf(entries, new Predicate() { + @Override + public boolean apply(AclEntry input) { + if (input.getName() == null) { + return true; + } + return false; + } + }); + } + class ProxyFileSystem23 extends ProxyFileSystem { public ProxyFileSystem23(FileSystem fs) { super(fs); diff --git shims/common-secure/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java shims/common-secure/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java index 39dbc3b..2cf14bd 100644 --- shims/common-secure/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java +++ shims/common-secure/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java @@ -31,10 +31,12 @@ import java.util.List; import java.util.Set; +import org.apache.commons.lang.ArrayUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil; @@ -615,4 +617,10 @@ abstract public boolean moveToAppropriateTrash(FileSystem fs, Path path, Configu @Override abstract public FileSystem getNonCachedFileSystem(URI uri, Configuration conf) throws IOException; + + protected void run(FsShell shell, String[] command) throws Exception { + LOG.debug(ArrayUtils.toString(command)); + int retval = shell.run(command); + LOG.debug("Return value is :" + retval); + } } diff --git shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java index b8fdd85..841443e 100644 --- shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java +++ shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java @@ -480,6 +480,35 @@ RecordReader getRecordReader(JobConf job, InputSplitShim split, Reporter reporte */ public void hflush(FSDataOutputStream stream) throws IOException; + /** + * For a given file, return a file status + * @param conf + * @param fs + * @param file + * @return + * @throws IOException + */ + public HdfsFileStatus getFullFileStatus(Configuration conf, FileSystem fs, Path file) throws IOException; + + /** + * For a given file, set a given file status. + * @param conf + * @param sourceStatus + * @param fs + * @param target + * @throws IOException + */ + public void setFullFileStatus(Configuration conf, HdfsFileStatus sourceStatus, + FileSystem fs, Path target) throws IOException; + + /** + * Includes the vanilla FileStatus, and AclStatus if it applies to this version of hadoop. + */ + public interface HdfsFileStatus { + public FileStatus getFileStatus(); + public void debugLog(); + } + public HCatHadoopShims getHCatShim(); public interface HCatHadoopShims {