Index: src/main/java/org/apache/hadoop/hbase/io/Reference.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/Reference.java (revision 1161985) +++ src/main/java/org/apache/hadoop/hbase/io/Reference.java (working copy) @@ -125,8 +125,7 @@ public Path write(final FileSystem fs, final Path p) throws IOException { - FSUtils.create(fs, p); - FSDataOutputStream out = fs.create(p); + FSDataOutputStream out = fs.create(p, false); try { write(out); } finally { Index: src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (revision 1211314) +++ src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (working copy) @@ -526,7 +526,14 @@ */ private static void createSplitDir(final FileSystem fs, final Path splitdir) throws IOException { - if (fs.exists(splitdir)) throw new IOException("Splitdir already exits? " + splitdir); + if (fs.exists(splitdir)) { + LOG.info("The " + splitdir + + " directory exists. Hence deleting it to recreate it"); + if (!fs.delete(splitdir, true)) { + throw new IOException("Failed deletion of " + splitdir + + " before creating them again."); + } + } if (!fs.mkdirs(splitdir)) throw new IOException("Failed create of " + splitdir); } @@ -586,6 +593,10 @@ this.fileSplitTimeout, TimeUnit.MILLISECONDS); if (stillRunning) { threadPool.shutdownNow(); + // wait for the thread to shutdown completely. + while (!threadPool.isTerminated()) { + Thread.sleep(50); + } throw new IOException("Took too long to split the" + " files and create the references, aborting split"); }