Index: src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java (revision 105886) +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java (revision 108439) @@ -22,7 +22,6 @@ import static org.apache.hadoop.hbase.util.FSUtils.recoverFileLease; import java.io.EOFException; -import java.io.FileNotFoundException; import java.io.IOException; import java.io.InterruptedIOException; import java.lang.reflect.Constructor; @@ -44,22 +43,21 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.io.HeapSize; -import org.apache.hadoop.hbase.master.SplitLogManager.TaskFinisher.Status; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.RemoteExceptionHandler; +import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader; import org.apache.hadoop.hbase.regionserver.wal.HLog.Writer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CancelableProgressable; +import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.io.MultipleIOException; -import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; @@ -594,10 +592,24 @@ if (isCreate && !fs.exists(dir)) { if (!fs.mkdirs(dir)) LOG.warn("mkdir failed on " + dir); } - return new Path(dir, formatRecoveredEditsFileName(logEntry.getKey() - .getLogSeqNum())); + // Convert file name ends with .tmp, so ensure region's replayRecoveredEdits + // will not delete it + return new Path(dir, + getTmpRecoveredEditsFileName(formatRecoveredEditsFileName(logEntry + .getKey().getLogSeqNum()))); } + static String getTmpRecoveredEditsFileName(String fileName) { + return fileName + ".tmp"; + } + + static Path getCompletedRecoveredEditsFilePath(Path srcPath) { + String fileName = srcPath.getName(); + if (fileName.endsWith(".tmp")) + fileName = fileName.split(".tmp")[0]; + return new Path(srcPath.getParent(), fileName); + } + static String formatRecoveredEditsFileName(final long seqid) { return String.format("%019d", seqid); } @@ -1103,9 +1115,17 @@ thrown.add(ioe); continue; } - paths.add(wap.p); LOG.info("Closed path " + wap.p +" (wrote " + wap.editsWritten + " edits in " + (wap.nanosSpent / 1000/ 1000) + "ms)"); + Path dst = getCompletedRecoveredEditsFilePath(wap.p); + try { + fs.rename(wap.p, dst); + } catch (IOException ioe) { + LOG.error("Couldn't rename " + wap.p + " to " + dst, ioe); + thrown.add(ioe); + continue; + } + paths.add(dst); } if (!thrown.isEmpty()) { throw MultipleIOException.createIOException(thrown); Index: src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (revision 99701) +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (revision 108439) @@ -40,7 +40,6 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import java.util.regex.Matcher; @@ -1361,6 +1360,10 @@ // it a timestamp suffix. See moveAsideBadEditsFile. Matcher m = EDITFILES_NAME_PATTERN.matcher(p.getName()); result = fs.isFile(p) && m.matches(); + // Skip the file whose name ends with .tmp, because it means splithlog + // thread is writting this file. + if (p.getName().endsWith(".tmp")) + result = false; } catch (IOException e) { LOG.warn("Failed isFile check on " + p); }