Index: src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (revision 1441237) +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (working copy) @@ -636,9 +636,9 @@ nextHdfsOut = ((SequenceFileLogWriter)nextWriter).getWriterFSDataOutputStream(); } + // Clean up current writer. + Path oldFile = cleanupCurrentWriter(currentFilenum); synchronized (updateLock) { - // Clean up current writer. - Path oldFile = cleanupCurrentWriter(currentFilenum); this.writer = nextWriter; this.hdfs_out = nextHdfsOut; @@ -851,13 +851,16 @@ /* * Cleans up current writer closing and adding to outputfiles. - * Presumes we're operating inside an updateLock scope. * @return Path to current writer or null if none. * @throws IOException */ Path cleanupCurrentWriter(final long currentfilenum) throws IOException { Path oldFile = null; - if (this.writer != null) { + synchronized(updateLock) { + if (this.writer == null) { + return null; + } + } // Close the current writer, get a new one. try { // Wait till all current transactions are written to the hlog. @@ -869,9 +872,11 @@ " synced till here " + syncedTillHere); sync(); } - this.writer.close(); - this.writer = null; - closeErrorCount.set(0); + synchronized(updateLock) { + this.writer.close(); + this.writer = null; + closeErrorCount.set(0); + } } catch (IOException e) { LOG.error("Failed close of HLog writer", e); int errors = closeErrorCount.incrementAndGet(); @@ -894,7 +899,6 @@ oldFile = computeFilename(currentfilenum); this.outputfiles.put(Long.valueOf(this.logSeqNum.get()), oldFile); } - } return oldFile; }