Index: conf/hive-default.xml.template
===================================================================
--- conf/hive-default.xml.template (revision 1309184)
+++ conf/hive-default.xml.template (working copy)
@@ -1228,12 +1228,6 @@
- hive.files.umask.value
- 0002
- The dfs.umask value for the hive created folders
-
-
-
hive.use.input.primary.region
true
When creating a table from an input table, create the table in the input table's primary region.
Index: metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
===================================================================
--- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (revision 1309184)
+++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (working copy)
@@ -132,6 +132,11 @@
Database db = new Database();
db.setName(dbName);
client.createDatabase(db);
+ db = client.getDatabase(dbName);
+ Path dbPath = new Path(db.getLocationUri());
+ FileSystem fs = FileSystem.get(dbPath.toUri(), hiveConf);
+ FsPermission dbPermission = new FsPermission((short)488);
+ fs.setPermission(dbPath, dbPermission);
client.dropType(typeName);
Type typ1 = new Type();
@@ -178,6 +183,9 @@
tbl = client.getTable(dbName, tblName);
}
+ assertEquals(dbPermission, fs.getFileStatus(new Path(tbl.getSd().getLocation()))
+ .getPermission());
+
Partition part = makePartitionObject(dbName, tblName, vals, tbl, "/part1");
Partition part2 = makePartitionObject(dbName, tblName, vals2, tbl, "/part2");
Partition part3 = makePartitionObject(dbName, tblName, vals3, tbl, "/part3");
@@ -195,15 +203,21 @@
assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown);
Partition retp = client.add_partition(part);
assertNotNull("Unable to create partition " + part, retp);
+ assertEquals(dbPermission, fs.getFileStatus(new Path(retp.getSd().getLocation()))
+ .getPermission());
Partition retp2 = client.add_partition(part2);
assertNotNull("Unable to create partition " + part2, retp2);
+ assertEquals(dbPermission, fs.getFileStatus(new Path(retp2.getSd().getLocation()))
+ .getPermission());
Partition retp3 = client.add_partition(part3);
assertNotNull("Unable to create partition " + part3, retp3);
+ assertEquals(dbPermission, fs.getFileStatus(new Path(retp3.getSd().getLocation()))
+ .getPermission());
Partition retp4 = client.add_partition(part4);
assertNotNull("Unable to create partition " + part4, retp4);
+ assertEquals(dbPermission, fs.getFileStatus(new Path(retp4.getSd().getLocation()))
+ .getPermission());
-
-
Partition part_get = client.getPartition(dbName, tblName, part.getValues());
if(isThriftClient) {
// since we are using thrift, 'part' will not have the create time and
@@ -284,7 +298,6 @@
assertTrue("Bad partition spec should have thrown an exception", exceptionThrown);
Path partPath = new Path(part.getSd().getLocation());
- FileSystem fs = FileSystem.get(partPath.toUri(), hiveConf);
assertTrue(fs.exists(partPath));
@@ -307,6 +320,8 @@
// tested
retp = client.add_partition(part);
assertNotNull("Unable to create partition " + part, retp);
+ assertEquals(dbPermission, fs.getFileStatus(new Path(retp.getSd().getLocation()))
+ .getPermission());
// test add_partitions
@@ -344,6 +359,7 @@
Path mp5Path = new Path(mpart5.getSd().getLocation());
warehouse.mkdirs(mp5Path);
assertTrue(fs.exists(mp5Path));
+ assertEquals(dbPermission, fs.getFileStatus(mp5Path).getPermission());
// add_partitions(5,4) : err = duplicate keyvals on mpart4
savedException = null;
Index: metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
===================================================================
--- metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (revision 1309184)
+++ metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (working copy)
@@ -41,10 +41,11 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.MetaStoreFS;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -172,10 +173,17 @@
try {
fs = getFs(f);
LOG.debug("Creating directory if it doesn't exist: " + f);
- short umaskVal = (short) conf.getInt(HiveConf.ConfVars.HIVE_FILES_UMASK_VALUE.name(), 0002);
- FsPermission fsPermission = new FsPermission(umaskVal);
- FsPermission.setUMask(conf, fsPermission);
- return (fs.mkdirs(f) || fs.getFileStatus(f).isDir());
+ if (fs.exists(f)) {
+ return fs.getFileStatus(f).isDir();
+ }
+ boolean parentExists = fs.exists(f.getParent());
+ boolean success = fs.mkdirs(f);
+ if (success && parentExists) {
+ // Set the permission of parent directory if it exists. Group is
+ // automatically inherited.
+ fs.setPermission(f, fs.getFileStatus(f.getParent()).getPermission());
+ }
+ return success;
} catch (IOException e) {
closeFs(fs);
MetaStoreUtils.logAndThrowMetaException(e);
Index: common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
===================================================================
--- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (revision 1309184)
+++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (working copy)
@@ -569,7 +569,6 @@
// Whether to delete the scratchdir while startup
HIVE_START_CLEANUP_SCRATCHDIR("hive.start.cleanup.scratchdir", false),
HIVE_INSERT_INTO_MULTILEVEL_DIRS("hive.insert.into.multilevel.dirs", false),
- HIVE_FILES_UMASK_VALUE("hive.files.umask.value", 0002),
// parameters for using multiple clusters in a hive instance
HIVE_USE_INPUT_PRIMARY_REGION("hive.use.input.primary.region", true),