Index: src/java/org/apache/hcatalog/mapreduce/FileRecordWriterContainer.java =================================================================== --- src/java/org/apache/hcatalog/mapreduce/FileRecordWriterContainer.java (revision 1391581) +++ src/java/org/apache/hcatalog/mapreduce/FileRecordWriterContainer.java (working copy) @@ -146,8 +146,11 @@ if (baseOutputCommitter.needsTaskCommit(currContext)) { baseOutputCommitter.commitTask(currContext); } - org.apache.hadoop.mapred.JobContext currJobContext = HCatMapRedUtil.createJobContext(currContext); - baseOutputCommitter.commitJob(currJobContext); + // HCATALOG-513: commitJob() removes the temporary job directory which causes the directory to be unavailable + // for other running tasks and they all fail. We need a way/place to do checkOutputSpecs() and commitJob() + // without interfering with other tasks + // org.apache.hadoop.mapred.JobContext currJobContext = HCatMapRedUtil.createJobContext(currContext); + // baseOutputCommitter.commitJob(currJobContext); } } else { getBaseRecordWriter().close(reporter); @@ -205,7 +208,7 @@ //As it can throw a FileAlreadyExistsException when more than one mapper is writing to a partition //See HCATALOG-490, also to avoid contacting the namenode for each new FileOutputFormat instance //In general this should be ok for most FileOutputFormat implementations - //but may become an issue for cases when the method is used to perform other setup tasks + //but may become an issue for cases when the method is used to perform other setup tasks //setupJob() baseOutputCommitter.setupJob(currJobContext);