diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java index b85ef0fe71..4f04d5fd18 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; import org.apache.hadoop.fs.permission.AclEntryType; @@ -58,6 +59,7 @@ import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.JobStatus.State; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.hadoop.security.AccessControlException; import org.apache.hive.hcatalog.common.ErrorType; import org.apache.hive.hcatalog.common.HCatConstants; import org.apache.hive.hcatalog.common.HCatException; @@ -322,11 +324,10 @@ private String getPartitionRootLocation(String ptnLocn, int numPtnKeys) { /** * Generate partition metadata object to be used to add to metadata. - * @param context The job context. + * This method is called for static partitions only. * @param jobInfo The OutputJobInfo. * @param partLocnRoot The table-equivalent location root of the partition * (temporary dir if dynamic partition, table dir if static) - * @param dynPartPath The path of dynamic partition which is created * @param partKVs The keyvalue pairs that form the partition * @param outputSchema The output schema for the partition * @param params The parameters to store inside the partition @@ -339,8 +340,8 @@ private String getPartitionRootLocation(String ptnLocn, int numPtnKeys) { */ private Partition constructPartition( - JobContext context, OutputJobInfo jobInfo, - String partLocnRoot, String dynPartPath, Map partKVs, + OutputJobInfo jobInfo, + String partLocnRoot, Map partKVs, HCatSchema outputSchema, Map params, Table table, FileSystem fs, String grpName, FsPermission perms, List acls) throws IOException { @@ -363,12 +364,14 @@ private Partition constructPartition( // Sets permissions and group name on partition dirs and files. - Path partPath; - if (customDynamicLocationUsed) { - partPath = new Path(dynPartPath); - } else if (!dynamicPartitioningUsed - && Boolean.parseBoolean((String)table.getProperty("EXTERNAL")) - && jobInfo.getLocation() != null && jobInfo.getLocation().length() > 0) { + Path partPath = null; + + if (Boolean.valueOf((String)table.getProperty("EXTERNAL")) + && jobInfo.getLocation() != null && jobInfo.getLocation().length() > 0) { + // NOTE: This condition is true when initializing HCatStorer with two parameters: + // HCatStorer(, ). + // see TestHCatStorerWrapper.testStoreExternalTableWithExternalDir. + // // Now, we need to de-scratchify this location - i.e., get rid of any // _SCRATCH[\d].?[\d]+ from the location. String jobLocation = jobInfo.getLocation(); @@ -376,11 +379,29 @@ private Partition constructPartition( partPath = new Path(finalLocn); } else { partPath = new Path(partLocnRoot); + + boolean checkGroupPerms = true; + String newGroup = null; + FsPermission newPerms = null; + int i = 0; for (FieldSchema partKey : table.getPartitionKeys()) { if (i++ != 0) { - fs.mkdirs(partPath); // Attempt to make the path in case it does not exist before we check - applyGroupAndPerms(fs, partPath, perms, acls, grpName, false); + if (! fs.exists(partPath)) { + fs.mkdirs(partPath); + + if (checkGroupPerms) { + FileStatus fstat = fs.getFileStatus(partPath); + newGroup = compareGroups(fs, fstat, grpName); + newPerms = compareDirPermissions(fstat, perms); + checkGroupPerms = false; + } + + if (newGroup != null || newPerms != null || acls != null) { + applyGroupAndPerms(fs, partPath, newPerms, acls, newGroup, false); + newGroup = null; + } + } } partPath = constructPartialPartPath(partPath, partKey.getName().toLowerCase(), partKVs); } @@ -388,25 +409,157 @@ private Partition constructPartition( // Apply the group and permissions to the leaf partition and files. // Need not bother in case of HDFS as permission is taken care of by setting UMask - fs.mkdirs(partPath); // Attempt to make the path in case it does not exist before we check - if (!ShimLoader.getHadoopShims().getHCatShim().isFileInHDFS(fs, partPath)) { - applyGroupAndPerms(fs, partPath, perms, acls, grpName, true); + if (! fs.exists(partPath)) { + fs.mkdirs(partPath); + // NOTE: applyGroupAndPerms is called by registerPartitions on the leaf node. } - // Set the location in the StorageDescriptor - if (dynamicPartitioningUsed) { - String dynamicPartitionDestination = getFinalDynamicPartitionDestination(table, partKVs, jobInfo); - if (harProcessor.isEnabled()) { - harProcessor.exec(context, partition, partPath); - partition.getSd().setLocation( - harProcessor.getProcessedLocation(new Path(dynamicPartitionDestination))); - } else { - partition.getSd().setLocation(dynamicPartitionDestination); + partition.getSd().setLocation(partPath.toString()); + return partition; + } + + /** + * Generate a list of partition metadata objects to be used to add to metadata. + * This method is called only for dynamic partitions, but accounts for default + * partition locations and custom partition locations (patterns). + * @param context The job context. + * @param jobInfo The OutputJobInfo. + * @param destRoot The table-equivalent location root of the partition + * @param discoveredPartitions a map of the discovered partitions + * @param outputSchema The output schema for the partition + * @param params The parameters to store inside the partition + * @param table The Table metadata object under which this Partition will reside + * @param fs FileSystem object to operate on the underlying filesystem + * @param grpName Group name that owns the table dir + * @param perms FsPermission that's the default permission of the table dir. + * @param acls a list of extended ACLs associated with the table. + * @return a list of Partition objects associated with the discovered partitions. + * @throws java.io.IOException + */ + private List constructPartitions(JobContext context, OutputJobInfo jobInfo, String destRoot, + Map> discoveredPartitions, HCatSchema outputSchema, Map params, + Table table, FileSystem fs, String grpName, FsPermission perms, List acls) throws IOException { + + ArrayList partitions = new ArrayList(); + LinkedHashMap paths = new LinkedHashMap(); + + List fields = new ArrayList(); + for (HCatFieldSchema field : outputSchema.getFields()) { + fields.add(HCatSchemaUtils.getFieldSchema(field)); + } + + Path tableRoot = new Path(table.getTTable().getSd().getLocation()); + + if (customDynamicLocationUsed) { + // NOTE: There is no guarantee that the group/perms will be equivalent to that of the + // table in the case of custom partition locations. Hence, we will just add the leaf + // directories to paths. + for (Entry> entry : discoveredPartitions.entrySet()) { + String dest = getFinalDynamicPartitionDestination(table, entry.getValue(), jobInfo); + Path path = new Path(dest); + Path harPath = new Path(entry.getKey()); + + paths.put(path.getParent(), Boolean.TRUE); + + Partition partition = new Partition(); + partition.setDbName(table.getDbName()); + partition.setTableName(table.getTableName()); + partition.setSd(new StorageDescriptor(table.getTTable().getSd())); + + partition.getSd().setCols(fields); + partition.setValues(FileOutputFormatContainer.getPartitionValueList(table, entry.getValue())); + partition.setParameters(params); + + if (harProcessor.isEnabled()) { + harProcessor.exec(context, partition, harPath); + partition.getSd().setLocation(harProcessor.getProcessedLocation(new Path(dest))); + } else { + partition.getSd().setLocation(dest); + } + + partitions.add(partition); } } else { - partition.getSd().setLocation(partPath.toString()); + // Generate managed partition locations. + // NOTE: We do a best-effort attempt to ensure that non-custom dynamic partitions + // have group/perms that are equivalent to the table. Hence, we will add all of the + // parent directories of the leaf directories to paths. + int leaf = table.getPartitionKeys().size() - 1; + + for (Entry> entry : discoveredPartitions.entrySet()) { + Path path = new Path(destRoot); + Path harPath = new Path(ptnRootLocation); + + // Add all partition directories except for the leaf directories. + // Leaf directories will be renamed from _DYN to their destination. + int index = 0; + for (FieldSchema part : table.getPartitionKeys()) { + path = constructPartialPartPath(path, part.getName().toLowerCase(), entry.getValue()); + harPath = constructPartialPartPath(harPath, part.getName().toLowerCase(), entry.getValue()); + if (index++ < leaf) { + paths.put(path, Boolean.FALSE); + } + } + + Partition partition = new Partition(); + partition.setDbName(table.getDbName()); + partition.setTableName(table.getTableName()); + partition.setSd(new StorageDescriptor(table.getTTable().getSd())); + + partition.getSd().setCols(fields); + partition.setValues(FileOutputFormatContainer.getPartitionValueList(table, entry.getValue())); + partition.setParameters(params); + + String dest = getFinalDynamicPartitionDestination(table, entry.getValue(), jobInfo); + + if (harProcessor.isEnabled()) { + harProcessor.exec(context, partition, harPath); + partition.getSd().setLocation(harProcessor.getProcessedLocation(new Path(dest))); + } else { + partition.getSd().setLocation(dest); + } + + partitions.add(partition); + } } - return partition; + + // Set up the parent directories of all leaf directories. + String newGroup = null; + FsPermission newPerms = null; + + for (Map.Entry entry : paths.entrySet()) { + Path path = entry.getKey(); + + if (! fs.exists(path)) { + fs.mkdirs(path); + + // NOTE: applyGroupAndPerms is called on all directories regardless of whether + // the table is MANAGED or EXTERNAL or if customDynamicLocationUsed is true, which + // could mean the data is stored outside of the table directory. + Path parent = path.getParent(); + boolean isTopDirectory = tableRoot.equals(parent); + + // If the path is directly under the table root or a previously created directory + // the group will be correct, so just verify the perms. + if (isTopDirectory || (paths.containsKey(parent) && paths.get(parent))) { + FileStatus fstat = fs.getFileStatus(path); + newGroup = null; + newPerms = compareDirPermissions(fstat, perms); + } else { + FileStatus fstat = fs.getFileStatus(path); + newGroup = compareGroups(fs, fstat, grpName); + newPerms = compareDirPermissions(fstat, perms); + } + + if (newGroup != null || newPerms != null || acls != null) { + applyGroupAndPerms(fs, path, newPerms, acls, newGroup,false); + } + + entry.setValue(Boolean.TRUE); + } + } + + return partitions; } private void applyGroupAndPerms(FileSystem fs, Path path, FsPermission permission, @@ -418,20 +571,32 @@ private void applyGroupAndPerms(FileSystem fs, Path path, FsPermission permissio " group: " + group + " recursive: " + recursive); } + // Apply group. + if (group != null) { + try { + fs.setOwner(path, null, group); + } catch (AccessControlException e) { + LOG.warn("Unable to change group: " + path, e); + } + } + + // Apply permissions. + if (permission != null) { + fs.setPermission(path, permission); + } + if (acls != null && ! acls.isEmpty()) { fs.setAcl(path, acls); - } else { - fs.setPermission(path, permission); } if (recursive) { - List fileAcls = removeDefaultAcls(acls); + List fileAcls = getFileAclEntries(acls); for (FileStatus fileStatus : fs.listStatus(path)) { if (fileStatus.isDir()) { applyGroupAndPerms(fs, fileStatus.getPath(), permission, acls, group, true); } else { - applyGroupAndPerms(fs, fileStatus.getPath(), permission, fileAcls, group, false); + applyGroupAndPerms(fs, fileStatus.getPath(), permission, fileAcls, group,false); } } } @@ -597,13 +762,29 @@ private void moveTaskOutputs(FileSystem fs, Path file, Path srcDir, // partitioning is currently incompatible with append on mutable tables. final Path parentDir = finalOutputPath.getParent(); - // Create the directory - Path placeholder = new Path(parentDir, "_placeholder"); + + // NOTE: The following is very ugly code which masks broken behavior and works around bad + // behavior in the FileSystem implementation. + // + // Creating the following placeholder file has the side effect of creating all the parent + // directories required to move the leaf directories. This code masked the bug addressed by this + // patch. + // + // The placeholder file also addresses bad behavior in the FileSystem implementation. + // Calling rename(tbl/_DYN/a/b/c, tbl/a/b) will move the contents of the source directory into + // the destination directory if the destination directory is empty. A work around is to call + // rename(tbl/_DYN/a/b/c, tbl/a/b/c) to achieve the expected behavior, however there is no + // guarantee that tbl/a/b/c will not be created between calls for the existence of that directory + // and rename, which would result in undefined behavior. If the directory is created and is empty, + // then the contents would be moved as expected and the concurrent process might also try to move + // data there; Otherwise tbl/a/b/c/c will be created. + Path placeholder = new Path(parentDir, "_placeholder" + String.valueOf(Math.random())); if (fs.mkdirs(parentDir)) { - // It is weired but we need a placeholder, + // It is weird but we need a placeholder, // otherwise rename cannot move file to the right place fs.create(placeholder).close(); } + if (LOG.isDebugEnabled()) { LOG.debug("Moving directory: " + file + " to " + parentDir); } @@ -793,15 +974,15 @@ private void registerPartitions(JobContext context) throws IOException{ client = HCatUtil.getHiveMetastoreClient(hiveConf); FileStatus tblStat = fs.getFileStatus(tblPath); - String grpName = tblStat.getGroup(); - FsPermission perms = tblStat.getPermission(); - List acls = null; + String tblGroup = tblStat.getGroup(); + FsPermission tblPerms = tblStat.getPermission(); + List tblAcls = null; if (conf.getBoolean("dfs.namenode.acls.enabled", false)) { try { AclStatus stat = fs.getAclStatus(tblPath); if (stat != null && ! stat.getEntries().isEmpty()) { - acls = generateChildAcls(stat.getEntries(), perms); + tblAcls = generateChildAcls(stat.getEntries(), tblPerms); } } catch (UnsupportedOperationException e) { LOG.debug("Skipping ACLs", e); @@ -822,7 +1003,7 @@ private void registerPartitions(JobContext context) throws IOException{ client.alter_table(table.getDbName(), table.getTableName(), table.getTTable()); } - applyGroupAndPerms(fs, tblPath, tblStat.getPermission(), acls, grpName, true); + applyGroupAndPerms(fs, tblPath, tblStat.getPermission(), tblAcls, tblGroup, true); return; } @@ -832,16 +1013,17 @@ private void registerPartitions(JobContext context) throws IOException{ List partitionsToAdd = new ArrayList(); if (!dynamicPartitioningUsed) { - partitionsToAdd.add(constructPartition(context, jobInfo, tblPath.toString(), null, + partitionsToAdd.add(constructPartition(jobInfo, tblPath.toString(), jobInfo.getPartitionValues(), jobInfo.getOutputSchema(), getStorerParameterMap(storer), - table, fs, grpName, perms, acls)); + table, fs, tblGroup, tblPerms, tblAcls)); } else { - for (Entry> entry : partitionsDiscoveredByPath.entrySet()) { - partitionsToAdd.add(constructPartition(context, jobInfo, - getPartitionRootLocation(entry.getKey(), entry.getValue().size()), entry.getKey(), - entry.getValue(), jobInfo.getOutputSchema(), getStorerParameterMap(storer), table, - fs, grpName, perms, acls)); - } + // NOTE: Must initialize ptnRootLocation before ptnRootLocation is referenced below. + // Call it the same way as abortJob would. + getPartitionRootLocation(jobInfo.getLocation(), jobInfo.getTableInfo().getTable().getPartitionKeysSize()); + + partitionsToAdd = constructPartitions(context, jobInfo, tblPath.toString(), + partitionsDiscoveredByPath, jobInfo.getOutputSchema(), getStorerParameterMap(storer), + table, fs, tblGroup, tblPerms, tblAcls); } ArrayList> ptnInfos = new ArrayList>(); @@ -869,38 +1051,40 @@ private void registerPartitions(JobContext context) throws IOException{ */ //Publish the new partition(s) - if (dynamicPartitioningUsed && harProcessor.isEnabled() && (!partitionsToAdd.isEmpty())){ + if (harProcessor.isEnabled() && dynamicPartitioningUsed) { + if (!partitionsToAdd.isEmpty()) { - if (!customDynamicLocationUsed) { - Path src = new Path(ptnRootLocation); - // check here for each dir we're copying out, to see if it - // already exists, error out if so. - // Also, treat dyn-writes as writes to immutable tables. - moveTaskOutputs(fs, src, src, tblPath, true, true); // dryRun = true, immutable = true - moveTaskOutputs(fs, src, src, tblPath, false, true); - if (!src.equals(tblPath)){ - fs.delete(src, true); + if (!customDynamicLocationUsed) { + Path src = new Path(ptnRootLocation); + // check here for each dir we're copying out, to see if it + // already exists, error out if so. + // Also, treat dyn-writes as writes to immutable tables. + moveTaskOutputs(fs, src, src, tblPath, true, true); // dryRun = true, immutable = true + moveTaskOutputs(fs, src, src, tblPath, false, true); + if (!src.equals(tblPath)) { + fs.delete(src, true); + } + } else { + moveCustomLocationTaskOutputs(fs, table, hiveConf); } - } else { - moveCustomLocationTaskOutputs(fs, table, hiveConf); - } - try { - updateTableSchema(client, table, jobInfo.getOutputSchema()); - LOG.info("HAR is being used. The table {} has new partitions {}.", table.getTableName(), ptnInfos); - client.add_partitions(partitionsToAdd); - partitionsAdded = partitionsToAdd; - } catch (Exception e){ - // There was an error adding partitions : rollback fs copy and rethrow - for (Partition p : partitionsToAdd){ - Path ptnPath = new Path(harProcessor.getParentFSPath(new Path(p.getSd().getLocation()))); - if (fs.exists(ptnPath)){ - fs.delete(ptnPath,true); + try { + updateTableSchema(client, table, jobInfo.getOutputSchema()); + LOG.info("HAR is being used. The table {} has new partitions {}.", table.getTableName(), ptnInfos); + client.add_partitions(partitionsToAdd); + partitionsAdded = partitionsToAdd; + } catch (Exception e) { + // There was an error adding partitions : rollback fs copy and rethrow + for (Partition p : partitionsToAdd) { + Path ptnPath = new Path(harProcessor.getParentFSPath(new Path(p.getSd().getLocation()))); + if (fs.exists(ptnPath)) { + fs.delete(ptnPath, true); + } } + throw e; } - throw e; } - }else{ + } else { // no harProcessor, regular operation updateTableSchema(client, table, jobInfo.getOutputSchema()); @@ -979,10 +1163,37 @@ private void registerPartitions(JobContext context) throws IOException{ } } - // Set permissions appropriately for each of the partitions we just created - // so as to have their permissions mimic the table permissions - for (Partition p : partitionsAdded){ - applyGroupAndPerms(fs,new Path(p.getSd().getLocation()), tblStat.getPermission(), acls, tblStat.getGroup(), true); + // Examine the first partition to determine whether group/permissions need to be + // updated. Test whether group can be changed. + if (partitionsAdded.size() > 0) { + Path path = new Path(partitionsAdded.get(0).getSd().getLocation()); + + // Check for group differences. + FileStatus fstat = fs.getFileStatus(path); + String newGroup = compareGroups(fs, fstat, tblGroup); + + // Check for permissions differences. + FsPermission newPerms = compareDirPermissions(fstat, tblPerms); + + // Check permissions for one of the files. + // In some cases the directory permissions are correct, but the file permissions are not. + if (newPerms == null) { + RemoteIterator iter = fs.listStatusIterator(path); + fstat = iter.hasNext() ? iter.next() : null; + + if (fstat != null) { + newPerms = compareFilePermissions(fstat, tblPerms); + } + } + + // Set groups/permissions to mimic the table. + if (newGroup != null || newPerms != null || tblAcls != null) { + for (Partition part : partitionsAdded) { + path = new Path(part.getSd().getLocation()); + LOG.info("Group and/or permissions differ from table for partition: " + path); + applyGroupAndPerms(fs, path, newPerms, tblAcls, newGroup,true); + } + } } } @@ -1125,6 +1336,104 @@ public boolean apply(AclEntry acl) { return defaults; } + // TODO: CALEB: Delete? HIVE-13989 doesn't use getDefaultAclEntries(), from registerPartitions(). + /** + * Returns a list of AclEntry including DEFAULT ACLs and ACCESS ACLs as derived from the DEFAULTs. + * @param stat the AclStatus of the parent directory. + * @param perms the FsPermission of the parent directory. + * @return a list of AclEntry objects. + */ + private ArrayList getDefaultAclEntries(AclStatus stat, FsPermission perms) { + ArrayList defaults = new ArrayList(); + + boolean[] hasDefaults = { false, false, false, false }; + + for (AclEntry e : stat.getEntries()) { + if (e.getScope() == AclEntryScope.DEFAULT) { + AclEntry acl = new AclEntry.Builder().setName(e.getName()).setScope(AclEntryScope.ACCESS) + .setType(e.getType()).setPermission(e.getPermission()).build(); + + defaults.add(acl); + defaults.add(e); + + if (e.getName() == null) { + if (e.getType() == AclEntryType.USER) { + hasDefaults[0] = true; + } + if (e.getType() == AclEntryType.GROUP) { + hasDefaults[1] = true; + } + if (e.getType() == AclEntryType.OTHER) { + hasDefaults[2] = true; + } + if (e.getType() == AclEntryType.MASK) { + hasDefaults[3] = true; + } + } + } + } + + if (! hasDefaults[0]) { + AclEntry acl = new AclEntry.Builder().setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.USER).setPermission(perms.getUserAction()).build(); + defaults.add(acl); + + hasDefaults[0] = true; + } + + if (! hasDefaults[1]) { + for (AclEntry e : stat.getEntries()) { + if ((e.getScope() == AclEntryScope.ACCESS) && (e.getType() == AclEntryType.GROUP) && (e.getName() == null)) { + AclEntry acl = new AclEntry.Builder().setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.GROUP).setPermission(e.getPermission()).build(); + defaults.add(acl); + + hasDefaults[1] = true; + } + } + } + + if (! hasDefaults[2]) { + AclEntry acl = new AclEntry.Builder().setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.OTHER).setPermission(perms.getOtherAction()).build(); + defaults.add(acl); + + hasDefaults[2] = true; + } + + if (! hasDefaults[3]) { + AclEntry acl = new AclEntry.Builder().setScope(AclEntryScope.ACCESS) + .setType(AclEntryType.MASK).setPermission(perms.getGroupAction()).build(); + defaults.add(acl); + + hasDefaults[3] = true; + } + + return defaults; + } + + /** + * Returns a list of AclEntry excluding the default ACLs. + * Hadoop will throw an exception if DEFAULT ACLs are applied to files. + * @param acls the list of ACLs for the parent directory. + * @return a list of AclEntry excluding the default ACLs. + */ + private List getFileAclEntries(List acls) { + ArrayList fileAcls = null; + + if (acls != null) { + fileAcls = new ArrayList(); + + for (AclEntry entry : acls) { + if (entry.getScope() != AclEntryScope.DEFAULT) { + fileAcls.add(entry); + } + } + } + + return fileAcls; + } + private List removeDefaultAcls(List acls) { List nonDefaults = null; @@ -1143,4 +1452,40 @@ public boolean apply(AclEntry acl) { return nonDefaults; } + + private String compareGroups(FileSystem fs, FileStatus fstat, String group) throws IOException { + if (! group.equals(fstat.getGroup())) { + // Attempt to change the group on one file to determine if the user is part of the group. + try { + fs.setOwner(fstat.getPath(), null, group); + return group; + } catch (AccessControlException e) { + LOG.warn("Unable to change group to: " + group, e); + } + } + + return null; + } + + private FsPermission comparePermissions(FileStatus fstat, FsPermission perms, short mask) { + short permsMasked = (short) (perms.toShort() & mask); + short perm = (short) (fstat.getPermission().toShort() & mask); + if (permsMasked != perm) { + return perms; + } + + return null; + } + + private FsPermission compareDirPermissions(FileStatus fstat, FsPermission perms) { + short mask = 00777; + + return comparePermissions(fstat, perms, mask); + } + + private FsPermission compareFilePermissions(FileStatus fstat, FsPermission perms) { + short mask = 00666; + + return comparePermissions(fstat, perms, mask); + } } diff --git a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java index 985a5bd169..f2be92b297 100644 --- a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java +++ b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java @@ -814,18 +814,18 @@ public ProxyFileSystem23(FileSystem fs, URI uri) { public RemoteIterator listLocatedStatus(final Path f) throws FileNotFoundException, IOException { return new RemoteIterator() { - private final RemoteIterator stats = + private final RemoteIterator locatedFileStatusIterator = ProxyFileSystem23.super.listLocatedStatus( ProxyFileSystem23.super.swizzleParamPath(f)); @Override public boolean hasNext() throws IOException { - return stats.hasNext(); + return locatedFileStatusIterator.hasNext(); } @Override public LocatedFileStatus next() throws IOException { - LocatedFileStatus result = stats.next(); + LocatedFileStatus result = locatedFileStatusIterator.next(); return new LocatedFileStatus( ProxyFileSystem23.super.swizzleFileStatus(result, false), result.getBlockLocations()); @@ -833,6 +833,25 @@ public LocatedFileStatus next() throws IOException { }; } + @Override + public RemoteIterator listStatusIterator(final Path f) throws IOException { + return new RemoteIterator() { + private final RemoteIterator fileStatusIterator = + ProxyFileSystem23.super.listStatusIterator( + ProxyFileSystem23.super.swizzleParamPath(f)); + + @Override + public boolean hasNext() throws IOException { + return fileStatusIterator.hasNext(); + } + + @Override + public FileStatus next() throws IOException { + return new FileStatus(ProxyFileSystem23.super.swizzleFileStatus(fileStatusIterator.next(), false)); + } + }; + } + /** * Proxy file system also needs to override the access() method behavior. * Cannot add Override annotation since FileSystem.access() may not exist in