Index: src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java (revision 1304523) +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogWriter.java (working copy) @@ -311,7 +311,11 @@ @Override public long getLength() throws IOException { - return this.writer.getLength(); + try { + return this.writer.getLength(); + } catch (Exception x) { + throw new IOException(x); + } } /** Index: src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (revision 1304523) +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (working copy) @@ -1231,10 +1231,19 @@ // Atomically fetch all existing pending writes. New writes // will start accumulating in a new list. List pending = getPendingWrites(); - - // write out all accumulated Entries to hdfs. - for (Entry e : pending) { - writer.append(e); + try { + // write out all accumulated Entries to hdfs. + for (Entry e : pending) { + writer.append(e); + } + } catch (IOException io) { + // try again is lock held + // (guard against concurrent close of writer) + synchronized(HLog.this.updateLock) { + for (Entry e : pending) { + writer.append(e); + } + } } } @@ -1288,8 +1297,16 @@ syncTime.inc(System.currentTimeMillis() - now); if (!this.logRollRunning) { checkLowReplication(); - if (this.writer.getLength() > this.logrollsize) { - requestLogRoll(); + try { + if (this.writer.getLength() > this.logrollsize) { + requestLogRoll(); + } + } catch (IOException io) { + // not really a problem + // if writer was just closed we're fine anyway + // other rolling condition will be checked again + // next time + LOG.debug("Log roll failed", io); } } } catch (IOException e) {