diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java index 3f5deaf..76d4f18 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java @@ -371,7 +371,8 @@ private Partition constructPartition( for (FieldSchema partKey : table.getPartitionKeys()) { if (i++ != 0) { fs.mkdirs(partPath); // Attempt to make the path in case it does not exist before we check - applyGroupAndPerms(fs, partPath, perms, grpName, false); + FileStatus currentStatus = fs.getFileStatus(partPath); + applyGroupAndPerms(fs, currentStatus, perms, false); } partPath = constructPartialPartPath(partPath, partKey.getName().toLowerCase(), partKVs); } @@ -381,7 +382,8 @@ private Partition constructPartition( // Need not bother in case of HDFS as permission is taken care of by setting UMask fs.mkdirs(partPath); // Attempt to make the path in case it does not exist before we check if (!ShimLoader.getHadoopShims().getHCatShim().isFileInHDFS(fs, partPath)) { - applyGroupAndPerms(fs, partPath, perms, grpName, true); + FileStatus currentStatus = fs.getFileStatus(partPath); + applyGroupAndPerms(fs, currentStatus, perms, true); } // Set the location in the StorageDescriptor @@ -400,26 +402,29 @@ private Partition constructPartition( return partition; } - private void applyGroupAndPerms(FileSystem fs, Path dir, FsPermission permission, - String group, boolean recursive) - throws IOException { + private void applyGroupAndPerms(FileSystem fs, FileStatus currentStatus, FsPermission permission, + boolean recursive) throws IOException { if(LOG.isDebugEnabled()) { - LOG.debug("applyGroupAndPerms : " + dir + - " perms: " + permission + - " group: " + group + " recursive: " + recursive); + LOG.debug("applyGroupAndPerms : " + currentStatus.getPath() + + " perms: " + permission + " recursive: " + recursive); + } + if (!currentStatus.getPermission().equals(permission)) { + fs.setPermission(currentStatus.getPath(), permission); } - fs.setPermission(dir, permission); if (recursive) { - for (FileStatus fileStatus : fs.listStatus(dir)) { + for (FileStatus fileStatus : fs.listStatus(currentStatus.getPath())) { if (fileStatus.isDir()) { - applyGroupAndPerms(fs, fileStatus.getPath(), permission, group, true); + applyGroupAndPerms(fs, fileStatus, permission, true); } else { - fs.setPermission(fileStatus.getPath(), permission); + if (!fileStatus.getPermission().equals(permission)) { + fs.setPermission(fileStatus.getPath(), permission); + } } } } } + private String getFinalDynamicPartitionDestination(Table table, Map partKVs, OutputJobInfo jobInfo) { Path partPath = new Path(table.getTTable().getSd().getLocation()); @@ -950,7 +955,8 @@ private void registerPartitions(JobContext context) throws IOException{ // Set permissions appropriately for each of the partitions we just created // so as to have their permissions mimic the table permissions for (Partition p : partitionsAdded){ - applyGroupAndPerms(fs,new Path(p.getSd().getLocation()),tblStat.getPermission(),tblStat.getGroup(),true); + FileStatus currentStatus = fs.getFileStatus(new Path(p.getSd().getLocation())); + applyGroupAndPerms(fs, currentStatus, tblStat.getPermission(), true); } }