Index: src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogActionsListener.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogActionsListener.java (revision 946813) +++ src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogActionsListener.java (working copy) @@ -84,7 +84,7 @@ public void testActionListener() throws Exception { DummyLogActionsListener list = new DummyLogActionsListener(); DummyLogActionsListener laterList = new DummyLogActionsListener(); - HLog hlog = new HLog(fs, logDir, oldLogDir, conf, null, list); + HLog hlog = new HLog(fs, logDir, oldLogDir, conf, null, list, "null"); HRegionInfo hri = new HRegionInfo(new HTableDescriptor(SOME_BYTES), SOME_BYTES, SOME_BYTES, false); Index: src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java (revision 946813) +++ src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java (working copy) @@ -150,7 +150,7 @@ // gives you EOFE. wal.sync(); // Open a Reader. - Path walPath = wal.computeFilename(wal.getFilenum()); + Path walPath = wal.computeFilename(); HLog.Reader reader = HLog.getReader(fs, walPath, conf); int count = 0; HLog.Entry entry = new HLog.Entry(); @@ -281,7 +281,7 @@ long logSeqId = log.startCacheFlush(); log.completeCacheFlush(regionName, tableName, logSeqId, info.isMetaRegion()); log.close(); - Path filename = log.computeFilename(log.getFilenum()); + Path filename = log.computeFilename(); log = null; // Now open a reader on the log and assert append worked. reader = HLog.getReader(fs, filename, conf); @@ -349,7 +349,7 @@ long logSeqId = log.startCacheFlush(); log.completeCacheFlush(hri.getRegionName(), tableName, logSeqId, false); log.close(); - Path filename = log.computeFilename(log.getFilenum()); + Path filename = log.computeFilename(); log = null; // Now open a reader on the log and assert append worked. reader = HLog.getReader(fs, filename, conf); Index: src/test/java/org/apache/hadoop/hbase/master/TestOldLogsCleaner.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/master/TestOldLogsCleaner.java (revision 946813) +++ src/test/java/org/apache/hadoop/hbase/master/TestOldLogsCleaner.java (working copy) @@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals; +import org.apache.hadoop.fs.FileStatus; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -34,6 +35,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.conf.Configuration; +import java.net.URLEncoder; import java.util.concurrent.atomic.AtomicBoolean; public class TestOldLogsCleaner { @@ -74,32 +76,41 @@ Configuration c = TEST_UTIL.getConfiguration(); Path oldLogDir = new Path(TEST_UTIL.getTestDir(), HConstants.HREGION_OLDLOGDIR_NAME); + String fakeMachineName = URLEncoder.encode("regionserver:60020", "UTF8"); FileSystem fs = FileSystem.get(c); AtomicBoolean stop = new AtomicBoolean(false); OldLogsCleaner cleaner = new OldLogsCleaner(1000, stop,c, fs, oldLogDir); + // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files long now = System.currentTimeMillis(); fs.delete(oldLogDir, true); fs.mkdirs(oldLogDir); fs.createNewFile(new Path(oldLogDir, "a")); - fs.createNewFile(new Path(oldLogDir, "1.hlog.dat.a")); - fs.createNewFile(new Path(oldLogDir, "1.hlog.dat." + now)); - for(int i = 0; i < 30; i++) { - fs.createNewFile(new Path(oldLogDir, 1 + "hlog.dat." + - (now - 6000000 - i))); + fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a")); + fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now)); + System.out.println("Now is: " + now); + for (int i = 0; i < 30; i++) { + fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now - 6000000 - i) )); } - fs.createNewFile(new Path(oldLogDir, "a.hlog.dat." + (now + 10000))); + for (FileStatus stat : fs.listStatus(oldLogDir)) { + System.out.println(stat.getPath().toString()); + } + fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) )); + assertEquals(34, fs.listStatus(oldLogDir).length); + // This will take care of 20 old log files (default max we can delete) cleaner.chore(); assertEquals(14, fs.listStatus(oldLogDir).length); + // We will delete all remaining log files and those that are invalid cleaner.chore(); - assertEquals(1, fs.listStatus(oldLogDir).length); + // We end up with the current log file and a newer one + assertEquals(2, fs.listStatus(oldLogDir).length); } } Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 946813) +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (working copy) @@ -86,6 +86,7 @@ import java.lang.reflect.Constructor; import java.net.BindException; import java.net.InetSocketAddress; +import java.net.URLEncoder; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -939,7 +940,8 @@ // instantiate protected HLog instantiateHLog(Path logdir, Path oldLogDir) throws IOException { - HLog newlog = new HLog(fs, logdir, oldLogDir, conf, hlogRoller); + HLog newlog = new HLog(fs, logdir, oldLogDir, conf, hlogRoller, null, + URLEncoder.encode(serverInfo.getServerAddress().toString(), "UTF-8")); return newlog; } Index: src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (revision 946813) +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (working copy) @@ -123,6 +123,7 @@ private final long optionalFlushInterval; private final long blocksize; private final int flushlogentries; + private final String prefix; private final AtomicInteger unflushedEntries = new AtomicInteger(0); private final Path oldLogDir; private final List actionListeners = @@ -262,7 +263,7 @@ public HLog(final FileSystem fs, final Path dir, final Path oldLogDir, final Configuration conf, final LogRollListener listener) throws IOException { - this(fs, dir, oldLogDir, conf, listener, null); + this(fs, dir, oldLogDir, conf, listener, null, null); } /** @@ -278,11 +279,12 @@ * @param conf configuration to use * @param listener listerner used to request log rolls * @param actionListener optional listener for hlog actions like archiving + * @param prefix what will prefix the file names, can be null * @throws IOException */ public HLog(final FileSystem fs, final Path dir, final Path oldLogDir, final Configuration conf, final LogRollListener listener, - final LogActionsListener actionListener) + final LogActionsListener actionListener, final String prefix) throws IOException { super(); this.fs = fs; @@ -316,6 +318,7 @@ if (actionListener != null) { addLogActionsListerner(actionListener); } + this.prefix = prefix == null ? "hlog" : prefix; // rollWriter sets this.hdfs_out if it can. rollWriter(); @@ -414,7 +417,7 @@ // Clean up current writer. Path oldFile = cleanupCurrentWriter(this.filenum); this.filenum = System.currentTimeMillis(); - Path newPath = computeFilename(this.filenum); + Path newPath = computeFilename(); this.writer = createWriter(fs, newPath, HBaseConfiguration.create(conf)); this.initialReplication = fs.getFileStatus(newPath).getReplication(); @@ -627,7 +630,7 @@ throw e; } if (currentfilenum >= 0) { - oldFile = computeFilename(currentfilenum); + oldFile = computeFilename(); this.outputfiles.put(Long.valueOf(this.logSeqNum.get() - 1), oldFile); } } @@ -650,12 +653,11 @@ /** * This is a convenience method that computes a new filename with a given * file-number. - * @param fn * @return Path */ - public Path computeFilename(final long fn) { - if (fn < 0) return null; - return new Path(dir, HLOG_DATFILE + fn); + public Path computeFilename() { + if (filenum < 0) return null; + return new Path(dir, prefix + "." + filenum); } /** Index: src/main/java/org/apache/hadoop/hbase/master/TimeToLiveLogCleaner.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/TimeToLiveLogCleaner.java (revision 946813) +++ src/main/java/org/apache/hadoop/hbase/master/TimeToLiveLogCleaner.java (working copy) @@ -40,10 +40,9 @@ public boolean isLogDeletable(Path filePath) { long time = 0; long currentTime = System.currentTimeMillis(); - System.out.println(filePath.getName()); String[] parts = filePath.getName().split("\\."); try { - time = Long.parseLong(parts[3]); + time = Long.parseLong(parts[parts.length-1]); } catch (NumberFormatException e) { e.printStackTrace(); } Index: src/main/java/org/apache/hadoop/hbase/master/OldLogsCleaner.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/OldLogsCleaner.java (revision 946813) +++ src/main/java/org/apache/hadoop/hbase/master/OldLogsCleaner.java (working copy) @@ -48,8 +48,8 @@ private final Path oldLogDir; private final LogCleanerDelegate logCleaner; private final Configuration conf; - // We expect a file looking like hlog.dat.ts - private final Pattern pattern = Pattern.compile("\\d*\\.hlog\\.dat\\.\\d*"); + // We expect a file looking like encoded_hostnameAndPort.ts + private final Pattern pattern = Pattern.compile(".*\\.\\d*"); /** *