Index: conf/hive-default.xml.template
===================================================================
--- conf/hive-default.xml.template (revision 1311353)
+++ conf/hive-default.xml.template (working copy)
@@ -1228,9 +1228,11 @@
- hive.files.umask.value
- 0002
- The dfs.umask value for the hive created folders
+ hive.warehouse.subdir.inherit.perms
+ false
+ Set this to true if the the table directories should inherit the
+ permission of the warehouse or database directory instead of being created
+ with the permissions derived from dfs umask
Index: metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
===================================================================
--- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (revision 1311353)
+++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (working copy)
@@ -132,6 +132,17 @@
Database db = new Database();
db.setName(dbName);
client.createDatabase(db);
+ db = client.getDatabase(dbName);
+ Path dbPath = new Path(db.getLocationUri());
+ FileSystem fs = FileSystem.get(dbPath.toUri(), hiveConf);
+ boolean inheritPerms = hiveConf.getBoolVar(
+ HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
+ FsPermission dbPermission = fs.getFileStatus(dbPath).getPermission();
+ if (inheritPerms) {
+ //Set different perms for the database dir for further tests
+ dbPermission = new FsPermission((short)488);
+ fs.setPermission(dbPath, dbPermission);
+ }
client.dropType(typeName);
Type typ1 = new Type();
@@ -178,6 +189,9 @@
tbl = client.getTable(dbName, tblName);
}
+ assertEquals(dbPermission, fs.getFileStatus(new Path(tbl.getSd().getLocation()))
+ .getPermission());
+
Partition part = makePartitionObject(dbName, tblName, vals, tbl, "/part1");
Partition part2 = makePartitionObject(dbName, tblName, vals2, tbl, "/part2");
Partition part3 = makePartitionObject(dbName, tblName, vals3, tbl, "/part3");
@@ -195,15 +209,21 @@
assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown);
Partition retp = client.add_partition(part);
assertNotNull("Unable to create partition " + part, retp);
+ assertEquals(dbPermission, fs.getFileStatus(new Path(retp.getSd().getLocation()))
+ .getPermission());
Partition retp2 = client.add_partition(part2);
assertNotNull("Unable to create partition " + part2, retp2);
+ assertEquals(dbPermission, fs.getFileStatus(new Path(retp2.getSd().getLocation()))
+ .getPermission());
Partition retp3 = client.add_partition(part3);
assertNotNull("Unable to create partition " + part3, retp3);
+ assertEquals(dbPermission, fs.getFileStatus(new Path(retp3.getSd().getLocation()))
+ .getPermission());
Partition retp4 = client.add_partition(part4);
assertNotNull("Unable to create partition " + part4, retp4);
+ assertEquals(dbPermission, fs.getFileStatus(new Path(retp4.getSd().getLocation()))
+ .getPermission());
-
-
Partition part_get = client.getPartition(dbName, tblName, part.getValues());
if(isThriftClient) {
// since we are using thrift, 'part' will not have the create time and
@@ -284,7 +304,6 @@
assertTrue("Bad partition spec should have thrown an exception", exceptionThrown);
Path partPath = new Path(part.getSd().getLocation());
- FileSystem fs = FileSystem.get(partPath.toUri(), hiveConf);
assertTrue(fs.exists(partPath));
@@ -307,6 +326,8 @@
// tested
retp = client.add_partition(part);
assertNotNull("Unable to create partition " + part, retp);
+ assertEquals(dbPermission, fs.getFileStatus(new Path(retp.getSd().getLocation()))
+ .getPermission());
// test add_partitions
@@ -344,6 +365,7 @@
Path mp5Path = new Path(mpart5.getSd().getLocation());
warehouse.mkdirs(mp5Path);
assertTrue(fs.exists(mp5Path));
+ assertEquals(dbPermission, fs.getFileStatus(mp5Path).getPermission());
// add_partitions(5,4) : err = duplicate keyvals on mpart4
savedException = null;
Index: metastore/src/test/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
===================================================================
--- metastore/src/test/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java (revision 1311353)
+++ metastore/src/test/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java (working copy)
@@ -18,6 +18,7 @@
package org.apache.hadoop.hive.metastore;
+import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.util.StringUtils;
public class TestEmbeddedHiveMetaStore extends TestHiveMetaStore {
@@ -25,7 +26,9 @@
@Override
protected void setUp() throws Exception {
super.setUp();
-
+ hiveConf.setBoolean(
+ HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS.varname, true);
+ warehouse = new Warehouse(hiveConf);
try {
client = new HiveMetaStoreClient(hiveConf, null);
} catch (Throwable e) {
Index: metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (revision 1311353)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (working copy)
@@ -41,7 +41,6 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.conf.HiveConf;
@@ -64,6 +63,7 @@
private MetaStoreFS fsHandler = null;
private boolean storageAuthCheck = false;
+ private boolean inheritPerms = false;
public Warehouse(Configuration conf) throws MetaException {
this.conf = conf;
@@ -75,6 +75,8 @@
fsHandler = getMetaStoreFsHandler(conf);
storageAuthCheck = HiveConf.getBoolVar(conf,
HiveConf.ConfVars.METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS);
+ inheritPerms = HiveConf.getBoolVar(conf,
+ HiveConf.ConfVars.HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS);
}
private MetaStoreFS getMetaStoreFsHandler(Configuration conf)
@@ -172,10 +174,17 @@
try {
fs = getFs(f);
LOG.debug("Creating directory if it doesn't exist: " + f);
- short umaskVal = (short) conf.getInt(HiveConf.ConfVars.HIVE_FILES_UMASK_VALUE.name(), 0002);
- FsPermission fsPermission = new FsPermission(umaskVal);
- FsPermission.setUMask(conf, fsPermission);
- return (fs.mkdirs(f) || fs.getFileStatus(f).isDir());
+ if (fs.exists(f)) {
+ return fs.getFileStatus(f).isDir();
+ }
+ boolean parentExists = fs.exists(f.getParent());
+ boolean success = fs.mkdirs(f);
+ if (this.inheritPerms && success && parentExists) {
+ // Set the permission of parent directory if it exists. Group is
+ // automatically inherited.
+ fs.setPermission(f, fs.getFileStatus(f.getParent()).getPermission());
+ }
+ return success;
} catch (IOException e) {
closeFs(fs);
MetaStoreUtils.logAndThrowMetaException(e);
Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
===================================================================
--- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 1311353)
+++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy)
@@ -570,7 +570,7 @@
// Whether to delete the scratchdir while startup
HIVE_START_CLEANUP_SCRATCHDIR("hive.start.cleanup.scratchdir", false),
HIVE_INSERT_INTO_MULTILEVEL_DIRS("hive.insert.into.multilevel.dirs", false),
- HIVE_FILES_UMASK_VALUE("hive.files.umask.value", 0002),
+ HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS("hive.warehouse.subdir.inherit.perms", false),
// parameters for using multiple clusters in a hive instance
HIVE_USE_INPUT_PRIMARY_REGION("hive.use.input.primary.region", true),