diff --git a/conf/hbase-default.xml b/conf/hbase-default.xml
index 9181a6c..e03f965 100644
--- a/conf/hbase-default.xml
+++ b/conf/hbase-default.xml
@@ -283,6 +283,13 @@
+ hbase.master.logcleaner.ttl
+ 60000
+ Maximum time a log can stay in the .oldlogdir directory,
+ after which it will be cleaned by a master thread.
+
+
+
hbase.regions.percheckin
10
Maximum number of regions that can be assigned in a single go
diff --git a/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/THLog.java b/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/THLog.java
index d2515c3..8ce5fe2 100644
--- a/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/THLog.java
+++ b/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/THLog.java
@@ -41,9 +41,9 @@ import org.apache.hadoop.hbase.regionserver.wal.LogRollListener;
*/
class THLog extends HLog {
- public THLog(FileSystem fs, Path dir, HBaseConfiguration conf,
- LogRollListener listener) throws IOException {
- super(fs, dir, conf, listener);
+ public THLog(FileSystem fs, Path dir, Path oldLogDir,
+ HBaseConfiguration conf, LogRollListener listener) throws IOException {
+ super(fs, dir, oldLogDir, conf, listener);
}
@Override
diff --git a/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegionServer.java b/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegionServer.java
index af8dfeb..9723704 100644
--- a/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegionServer.java
+++ b/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegionServer.java
@@ -107,10 +107,11 @@ public class TransactionalRegionServer extends HRegionServer implements
}
@Override
- protected HLog instantiateHLog(Path logdir) throws IOException {
+ protected HLog instantiateHLog(Path logdir, Path oldLogDir) throws IOException {
conf.set("hbase.regionserver.hlog.keyclass",
THLogKey.class.getCanonicalName());
- HLog newlog = new THLog(super.getFileSystem(), logdir, conf, super.getLogRoller());
+ HLog newlog = new THLog(super.getFileSystem(), logdir, oldLogDir,
+ conf, super.getLogRoller());
return newlog;
}
diff --git a/src/java/org/apache/hadoop/hbase/HConstants.java b/src/java/org/apache/hadoop/hbase/HConstants.java
index 07446cf..55492e4 100644
--- a/src/java/org/apache/hadoop/hbase/HConstants.java
+++ b/src/java/org/apache/hadoop/hbase/HConstants.java
@@ -124,6 +124,9 @@ public interface HConstants {
* Use '.' as a special character to seperate the log files from table data */
static final String HREGION_LOGDIR_NAME = ".logs";
+ /** Like the previous, but for old logs that are about to be deleted */
+ static final String HREGION_OLDLOGDIR_NAME = ".oldlogs";
+
/** Name of old log file for reconstruction */
static final String HREGION_OLDLOGFILE_NAME = "oldlogfile.log";
diff --git a/src/java/org/apache/hadoop/hbase/HMerge.java b/src/java/org/apache/hadoop/hbase/HMerge.java
index 9f0a7e7..6072080 100644
--- a/src/java/org/apache/hadoop/hbase/HMerge.java
+++ b/src/java/org/apache/hadoop/hbase/HMerge.java
@@ -114,8 +114,9 @@ class HMerge implements HConstants {
);
Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() +
HREGION_LOGDIR_NAME);
+ Path oldLogDir = new Path(tabledir, HREGION_OLDLOGDIR_NAME);
this.hlog =
- new HLog(fs, logdir, conf, null);
+ new HLog(fs, logdir, oldLogDir, conf, null);
}
void process() throws IOException {
diff --git a/src/java/org/apache/hadoop/hbase/master/HMaster.java b/src/java/org/apache/hadoop/hbase/master/HMaster.java
index 2428eb1..db22fbe 100644
--- a/src/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/src/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -135,6 +135,8 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
private final FileSystem fs;
// Is the fileystem ok?
private volatile boolean fsOk = true;
+ // The Path to the old logs dir
+ private final Path oldLogDir;
// Queues for RegionServerOperation events. Includes server open, shutdown,
// and region open and close.
@@ -166,6 +168,12 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
this.fs = FileSystem.get(this.conf);
checkRootDir(this.rootdir, this.conf, this.fs);
+ // Make sure the region servers can archive their old logs
+ this.oldLogDir = new Path(this.rootdir, HREGION_OLDLOGDIR_NAME);
+ if(!this.fs.exists(this.oldLogDir)) {
+ this.fs.mkdirs(this.oldLogDir);
+ }
+
// Get my address and create an rpc server instance. The rpc-server port
// can be ephemeral...ensure we have the correct info
HServerAddress a = new HServerAddress(getMyAddress(this.conf));
@@ -390,6 +398,14 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
}
/**
+ * Get the directory where old logs go
+ * @return the dir
+ */
+ public Path getOldLogDir() {
+ return this.oldLogDir;
+ }
+
+ /**
* Add to the passed m servers that are loaded less than
* l.
* @param l
@@ -624,7 +640,7 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
Path logDir =
new Path(this.rootdir, HLog.getHLogDirectoryName(serverName));
try {
- HLog.splitLog(this.rootdir, logDir, this.fs, getConfiguration());
+ HLog.splitLog(this.rootdir, logDir, oldLogDir, this.fs, getConfiguration());
} catch (IOException e) {
LOG.error("Failed splitting " + logDir.toString(), e);
} finally {
diff --git a/src/java/org/apache/hadoop/hbase/master/OldLogsCleaner.java b/src/java/org/apache/hadoop/hbase/master/OldLogsCleaner.java
new file mode 100644
index 0000000..537dfd3
--- /dev/null
+++ b/src/java/org/apache/hadoop/hbase/master/OldLogsCleaner.java
@@ -0,0 +1,83 @@
+/**
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import org.apache.hadoop.hbase.Chore;
+import org.apache.hadoop.hbase.RemoteExceptionHandler;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.io.IOException;
+
+public class OldLogsCleaner extends Chore {
+
+ static final Log LOG = LogFactory.getLog(OldLogsCleaner.class.getName());
+
+ private final HMaster master;
+ private final long ttl;
+ private final int maxDeletedLogs = 20;
+
+ public OldLogsCleaner(final int p, final AtomicBoolean s, HMaster master) {
+ super(p,s);
+ this.master = master;
+ this.ttl = master.getConfiguration().
+ getLong("hbase.master.logcleaner.ttl", 60000);
+ }
+
+ @Override
+ protected void chore() {
+ try {
+ FileStatus[] files =
+ this.master.getFileSystem().listStatus(this.master.getOldLogDir());
+ long currentTime = System.currentTimeMillis();
+ int nbDeletedLog = 0;
+ for(FileStatus file : files) {
+
+ String[] parts = file.getPath().getName().split("\\.");
+ if(parts.length == 4) {
+ long time = Long.parseLong(parts[3]);
+ long life = currentTime - time;
+
+ if(life < 0) {
+ LOG.warn("Found a log newer than current time, " +
+ " probably a clock skew");
+ continue;
+ }
+
+ if(life > ttl) {
+ this.master.getFileSystem().delete(file.getPath(), true);
+ nbDeletedLog++;
+ }
+
+ if(nbDeletedLog >= maxDeletedLogs) {
+ break;
+ }
+ }
+ }
+
+ } catch (IOException e) {
+ e = RemoteExceptionHandler.checkIOException(e);
+ LOG.warn("Error while cleaning the logs", e);
+ master.checkFileSystem();
+ }
+ }
+}
diff --git a/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java b/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java
index 0d44f93..f95e517 100644
--- a/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java
+++ b/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
/**
* Instantiated when a server's lease has expired, meaning it has crashed.
@@ -47,7 +48,7 @@ class ProcessServerShutdown extends RegionServerOperation {
private boolean isRootServer;
private List metaRegions;
- private Path oldLogDir;
+ private Path rsLogDir;
private boolean logSplit;
private boolean rootRescanned;
private HServerAddress deadServerAddress;
@@ -73,7 +74,7 @@ class ProcessServerShutdown extends RegionServerOperation {
this.deadServerAddress = serverInfo.getServerAddress();
this.logSplit = false;
this.rootRescanned = false;
- this.oldLogDir =
+ this.rsLogDir =
new Path(master.getRootDir(), HLog.getHLogDirectoryName(serverInfo));
// check to see if I am responsible for either ROOT or any of the META tables.
@@ -275,13 +276,13 @@ class ProcessServerShutdown extends RegionServerOperation {
master.getRegionManager().numOnlineMetaRegions());
if (!logSplit) {
// Process the old log file
- if (this.master.getFileSystem().exists(oldLogDir)) {
+ if (this.master.getFileSystem().exists(rsLogDir)) {
if (!master.getRegionManager().splitLogLock.tryLock()) {
return false;
}
try {
- HLog.splitLog(master.getRootDir(), oldLogDir,
- this.master.getFileSystem(),
+ HLog.splitLog(master.getRootDir(), rsLogDir,
+ this.master.getOldLogDir(), this.master.getFileSystem(),
this.master.getConfiguration());
} finally {
master.getRegionManager().splitLogLock.unlock();
diff --git a/src/java/org/apache/hadoop/hbase/master/ServerManager.java b/src/java/org/apache/hadoop/hbase/master/ServerManager.java
index 68f4108..b8dce3d 100644
--- a/src/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/src/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Threads;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.Watcher.Event.EventType;
@@ -95,6 +96,8 @@ public class ServerManager implements HConstants {
private int minimumServerCount;
+ private final OldLogsCleaner oldLogCleaner;
+
/*
* Dumps into log current stats on dead servers and number of servers
* TODO: Make this a metric; dump metrics into log.
@@ -142,6 +145,12 @@ public class ServerManager implements HConstants {
this.serverMonitorThread = new ServerMonitor(metaRescanInterval,
this.master.getShutdownRequested());
this.serverMonitorThread.start();
+ this.oldLogCleaner = new OldLogsCleaner(
+ c.getInt("hbase.master.meta.thread.rescanfrequency",
+ 60 * 1000), this.master.getShutdownRequested(), master);
+ Threads.setDaemonThreadRunning(oldLogCleaner,
+ "ServerManager.oldLogCleaner");
+
}
/**
diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 9577b39..dade39e 100644
--- a/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1859,7 +1859,8 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
FileSystem fs = FileSystem.get(conf);
fs.mkdirs(regionDir);
HRegion region = new HRegion(tableDir,
- new HLog(fs, new Path(regionDir, HREGION_LOGDIR_NAME), conf, null),
+ new HLog(fs, new Path(regionDir, HREGION_LOGDIR_NAME),
+ new Path(regionDir, HREGION_OLDLOGDIR_NAME), conf, null),
fs, conf, info, null);
region.initialize(null, null);
return region;
@@ -2532,7 +2533,8 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
FileSystem fs = FileSystem.get(c);
Path logdir = new Path(c.get("hbase.tmp.dir"),
"hlog" + tableDir.getName() + System.currentTimeMillis());
- HLog log = new HLog(fs, logdir, c, null);
+ Path oldLogDir = new Path(c.get("hbase.tmp.dir"), HREGION_OLDLOGDIR_NAME);
+ HLog log = new HLog(fs, logdir, oldLogDir, c, null);
try {
processTable(fs, tableDir, log, c, majorCompact);
} finally {
diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 10a1a7d..c280711 100644
--- a/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -940,7 +940,7 @@ public class HRegionServer implements HConstants, HRegionInterface,
private HLog setupHLog() throws RegionServerRunningException,
IOException {
-
+ Path oldLogDir = new Path(rootDir, HREGION_OLDLOGDIR_NAME);
Path logdir = new Path(rootDir, HLog.getHLogDirectoryName(this.serverInfo));
if (LOG.isDebugEnabled()) {
LOG.debug("Log dir " + logdir);
@@ -950,13 +950,13 @@ public class HRegionServer implements HConstants, HRegionInterface,
"running at " + this.serverInfo.getServerAddress().toString() +
" because logdir " + logdir.toString() + " exists");
}
- HLog newlog = instantiateHLog(logdir);
+ HLog newlog = instantiateHLog(logdir, oldLogDir);
return newlog;
}
// instantiate
- protected HLog instantiateHLog(Path logdir) throws IOException {
- HLog newlog = new HLog(fs, logdir, conf, hlogRoller);
+ protected HLog instantiateHLog(Path logdir, Path oldLogDir) throws IOException {
+ HLog newlog = new HLog(fs, logdir, oldLogDir, conf, hlogRoller);
return newlog;
}
diff --git a/src/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java b/src/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
index e868c3f..a947070 100644
--- a/src/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
+++ b/src/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
@@ -114,6 +114,7 @@ public class HLog implements HConstants, Syncable {
private final long blocksize;
private final int flushlogentries;
private final AtomicInteger unflushedEntries = new AtomicInteger(0);
+ private final Path oldLogDir;
public interface Reader {
@@ -217,8 +218,8 @@ public class HLog implements HConstants, Syncable {
* @param listener
* @throws IOException
*/
- public HLog(final FileSystem fs, final Path dir, final HBaseConfiguration conf,
- final LogRollListener listener)
+ public HLog(final FileSystem fs, final Path dir, final Path oldLogDir,
+ final HBaseConfiguration conf, final LogRollListener listener)
throws IOException {
super();
this.fs = fs;
@@ -238,6 +239,10 @@ public class HLog implements HConstants, Syncable {
throw new IOException("Target HLog directory already exists: " + dir);
}
fs.mkdirs(dir);
+ this.oldLogDir = oldLogDir;
+ if(!fs.exists(oldLogDir)) {
+ fs.mkdirs(this.oldLogDir);
+ }
this.maxLogs = conf.getInt("hbase.regionserver.maxlogs", 32);
this.enabled = conf.getBoolean("hbase.regionserver.hlog.enabled", true);
LOG.info("HLog configuration: blocksize=" + this.blocksize +
@@ -332,7 +337,7 @@ public class HLog implements HConstants, Syncable {
// flushed (and removed from the lastSeqWritten map). Means can
// remove all but currently open log file.
for (Map.Entry e : this.outputfiles.entrySet()) {
- deleteLogFile(e.getValue(), e.getKey());
+ archiveLogFile(e.getValue(), e.getKey());
}
this.outputfiles.clear();
} else {
@@ -422,7 +427,7 @@ public class HLog implements HConstants, Syncable {
}
if (sequenceNumbers.size() > 0) {
for (Long seq : sequenceNumbers) {
- deleteLogFile(this.outputfiles.remove(seq), seq);
+ archiveLogFile(this.outputfiles.remove(seq), seq);
}
}
int countOfLogs = this.outputfiles.size() - sequenceNumbers.size();
@@ -484,10 +489,12 @@ public class HLog implements HConstants, Syncable {
return oldFile;
}
- private void deleteLogFile(final Path p, final Long seqno) throws IOException {
- LOG.info("removing old hlog file " + FSUtils.getPath(p) +
- " whose highest sequence/edit id is " + seqno);
- this.fs.delete(p, true);
+ private void archiveLogFile(final Path p, final Long seqno) throws IOException {
+ Path newPath = getHLogArchivePath(this.oldLogDir, p);
+ LOG.info("moving old hlog file " + FSUtils.getPath(p) +
+ " whose highest sequence/edit id is " + seqno + " to " +
+ FSUtils.getPath(newPath));
+ this.fs.rename(p, newPath);
}
/**
@@ -508,6 +515,13 @@ public class HLog implements HConstants, Syncable {
*/
public void closeAndDelete() throws IOException {
close();
+ FileStatus[] files = fs.listStatus(this.dir);
+ for(FileStatus file : files) {
+ fs.rename(file.getPath(),
+ getHLogArchivePath(this.oldLogDir, file.getPath()));
+ }
+ LOG.debug("Moved " + files.length + " log files to " +
+ FSUtils.getPath(this.oldLogDir));
fs.delete(dir, true);
}
@@ -911,12 +925,12 @@ public class HLog implements HConstants, Syncable {
* @param rootDir qualified root directory of the HBase instance
* @param srcDir Directory of log files to split: e.g.
* ${ROOTDIR}/log_HOST_PORT
- * @param fs FileSystem
- * @param conf HBaseConfiguration
- * @throws IOException
+ * @param oldLogDir
+ *@param fs FileSystem
+ * @param conf HBaseConfiguration @throws IOException
*/
public static List splitLog(final Path rootDir, final Path srcDir,
- final FileSystem fs, final HBaseConfiguration conf)
+ Path oldLogDir, final FileSystem fs, final HBaseConfiguration conf)
throws IOException {
long millis = System.currentTimeMillis();
List splits = null;
@@ -931,8 +945,17 @@ public class HLog implements HConstants, Syncable {
}
LOG.info("Splitting " + logfiles.length + " hlog(s) in " +
srcDir.toString());
- splits = splitLog(rootDir, logfiles, fs, conf);
+ splits = splitLog(rootDir, oldLogDir, logfiles, fs, conf);
try {
+ FileStatus[] files = fs.listStatus(srcDir);
+ for(FileStatus file : files) {
+ Path newPath = getHLogArchivePath(oldLogDir, file.getPath());
+ LOG.debug("Moving " + FSUtils.getPath(file.getPath()) + " to " +
+ FSUtils.getPath(newPath));
+ fs.rename(file.getPath(), newPath);
+ }
+ LOG.debug("Moved " + files.length + " log files to " +
+ FSUtils.getPath(oldLogDir));
fs.delete(srcDir, true);
} catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e);
@@ -982,8 +1005,8 @@ public class HLog implements HConstants, Syncable {
* @return List of splits made.
*/
private static List splitLog(final Path rootDir,
- final FileStatus [] logfiles, final FileSystem fs,
- final HBaseConfiguration conf)
+ Path oldLogDir, final FileStatus[] logfiles, final FileSystem fs,
+ final HBaseConfiguration conf)
throws IOException {
final Map logWriters =
new TreeMap(Bytes.BYTES_COMPARATOR);
@@ -1059,12 +1082,13 @@ public class HLog implements HConstants, Syncable {
} catch (IOException e) {
LOG.warn("Close in finally threw exception -- continuing", e);
}
- // Delete the input file now so we do not replay edits. We could
+ // Archive the input file now so we do not replay edits. We could
// have gotten here because of an exception. If so, probably
// nothing we can do about it. Replaying it, it could work but we
// could be stuck replaying for ever. Just continue though we
// could have lost some edits.
- fs.delete(logfiles[i].getPath(), true);
+ fs.rename(logfiles[i].getPath(),
+ getHLogArchivePath(oldLogDir, logfiles[i].getPath()));
}
}
ExecutorService threadPool =
@@ -1249,6 +1273,10 @@ public class HLog implements HConstants, Syncable {
return dirName.toString();
}
+ private static Path getHLogArchivePath(Path oldLogDir, Path p) {
+ return new Path(oldLogDir, System.currentTimeMillis() + "." + p.getName());
+ }
+
private static void usage() {
System.err.println("Usage: java org.apache.hbase.HLog" +
" {--dump ... | --split ...}");
@@ -1279,6 +1307,7 @@ public class HLog implements HConstants, Syncable {
HBaseConfiguration conf = new HBaseConfiguration();
FileSystem fs = FileSystem.get(conf);
Path baseDir = new Path(conf.get(HBASE_DIR));
+ Path oldLogDir = new Path(baseDir, HREGION_OLDLOGDIR_NAME);
for (int i = 1; i < args.length; i++) {
Path logPath = new Path(args[i]);
if (!fs.exists(logPath)) {
@@ -1301,7 +1330,7 @@ public class HLog implements HConstants, Syncable {
if (!fs.getFileStatus(logPath).isDir()) {
throw new IOException(args[i] + " is not a directory");
}
- splitLog(baseDir, logPath, fs, conf);
+ splitLog(baseDir, logPath, oldLogDir, fs, conf);
}
}
}
diff --git a/src/java/org/apache/hadoop/hbase/util/MetaUtils.java b/src/java/org/apache/hadoop/hbase/util/MetaUtils.java
index d1c0dfc..cf26da5 100644
--- a/src/java/org/apache/hadoop/hbase/util/MetaUtils.java
+++ b/src/java/org/apache/hadoop/hbase/util/MetaUtils.java
@@ -95,7 +95,9 @@ public class MetaUtils {
if (this.log == null) {
Path logdir = new Path(this.fs.getHomeDirectory(),
HConstants.HREGION_LOGDIR_NAME + "_" + System.currentTimeMillis());
- this.log = new HLog(this.fs, logdir, this.conf, null);
+ Path oldLogDir = new Path(this.fs.getHomeDirectory(),
+ HConstants.HREGION_OLDLOGDIR_NAME);
+ this.log = new HLog(this.fs, logdir, oldLogDir, this.conf, null);
}
return this.log;
}
diff --git a/src/test/org/apache/hadoop/hbase/regionserver/TestStore.java b/src/test/org/apache/hadoop/hbase/regionserver/TestStore.java
index e59c2a2..6184444 100644
--- a/src/test/org/apache/hadoop/hbase/regionserver/TestStore.java
+++ b/src/test/org/apache/hadoop/hbase/regionserver/TestStore.java
@@ -3,11 +3,7 @@ package org.apache.hadoop.hbase.regionserver;
import junit.framework.TestCase;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
@@ -72,6 +68,7 @@ public class TestStore extends TestCase {
//Setting up a Store
Path basedir = new Path(DIR+methodName);
Path logdir = new Path(DIR+methodName+"/logs");
+ Path oldLogDir = new Path(basedir, HConstants.HREGION_OLDLOGDIR_NAME);
HColumnDescriptor hcd = new HColumnDescriptor(family);
HBaseConfiguration conf = new HBaseConfiguration();
FileSystem fs = FileSystem.get(conf);
@@ -83,7 +80,7 @@ public class TestStore extends TestCase {
HTableDescriptor htd = new HTableDescriptor(table);
htd.addFamily(hcd);
HRegionInfo info = new HRegionInfo(htd, null, null, false);
- HLog hlog = new HLog(fs, logdir, conf, null);
+ HLog hlog = new HLog(fs, logdir, oldLogDir, conf, null);
HRegion region = new HRegion(basedir, hlog, fs, conf, info, null);
store = new Store(basedir, region, hcd, fs, reconstructionLog, conf,
diff --git a/src/test/org/apache/hadoop/hbase/regionserver/TestStoreReconstruction.java b/src/test/org/apache/hadoop/hbase/regionserver/TestStoreReconstruction.java
index 4736e72..56fdad9 100644
--- a/src/test/org/apache/hadoop/hbase/regionserver/TestStoreReconstruction.java
+++ b/src/test/org/apache/hadoop/hbase/regionserver/TestStoreReconstruction.java
@@ -94,7 +94,9 @@ public class TestStoreReconstruction {
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(hcd);
HRegionInfo info = new HRegionInfo(htd, null, null, false);
- HLog log = new HLog(cluster.getFileSystem(), this.dir,conf, null);
+ Path oldLogDir = new Path(this.dir, HConstants.HREGION_OLDLOGDIR_NAME);
+ HLog log = new HLog(cluster.getFileSystem(),
+ this.dir, oldLogDir, conf, null);
HRegion region = new HRegion(dir, log,
cluster.getFileSystem(),conf, info, null);
List result = new ArrayList();
@@ -131,7 +133,7 @@ public class TestStoreReconstruction {
List splits =
HLog.splitLog(new Path(conf.get(HConstants.HBASE_DIR)),
- this.dir, cluster.getFileSystem(),conf);
+ this.dir, oldLogDir, cluster.getFileSystem(),conf);
// Split should generate only 1 file since there's only 1 region
assertTrue(splits.size() == 1);
diff --git a/src/test/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java b/src/test/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java
index 9a78911..45493a5 100644
--- a/src/test/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java
+++ b/src/test/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
/** JUnit test case for HLog */
public class TestHLog extends HBaseTestCase implements HConstants {
private Path dir;
+ private Path oldLogDir;
private MiniDFSCluster cluster;
@Override
@@ -55,6 +56,8 @@ public class TestHLog extends HBaseTestCase implements HConstants {
if (fs.exists(dir)) {
fs.delete(dir, true);
}
+ this.oldLogDir = new Path("/hbase", HConstants.HREGION_OLDLOGDIR_NAME);
+
}
@Override
@@ -74,7 +77,7 @@ public class TestHLog extends HBaseTestCase implements HConstants {
public void testSplit() throws IOException {
final byte [] tableName = Bytes.toBytes(getName());
final byte [] rowName = tableName;
- HLog log = new HLog(this.fs, this.dir, this.conf, null);
+ HLog log = new HLog(this.fs, this.dir, this.oldLogDir, this.conf, null);
final int howmany = 3;
// Add edits for three regions.
try {
@@ -95,7 +98,7 @@ public class TestHLog extends HBaseTestCase implements HConstants {
log.rollWriter();
}
List splits =
- HLog.splitLog(this.testDir, this.dir, this.fs, this.conf);
+ HLog.splitLog(this.testDir, this.dir, this.oldLogDir, this.fs, this.conf);
verifySplits(splits, howmany);
log = null;
} finally {
@@ -124,7 +127,7 @@ public class TestHLog extends HBaseTestCase implements HConstants {
out.close();
in.close();
Path subdir = new Path(this.dir, "hlogdir");
- HLog wal = new HLog(this.fs, subdir, this.conf, null);
+ HLog wal = new HLog(this.fs, subdir, this.oldLogDir, this.conf, null);
final int total = 20;
for (int i = 0; i < total; i++) {
List kvs = new ArrayList();
@@ -225,7 +228,7 @@ public class TestHLog extends HBaseTestCase implements HConstants {
final byte [] tableName = Bytes.toBytes("tablename");
final byte [] row = Bytes.toBytes("row");
HLog.Reader reader = null;
- HLog log = new HLog(fs, dir, this.conf, null);
+ HLog log = new HLog(fs, dir, this.oldLogDir, this.conf, null);
try {
// Write columns named 1, 2, 3, etc. and then values of single byte
// 1, 2, 3...
diff --git a/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java b/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java
index 7c599d3..935caf2 100644
--- a/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java
+++ b/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java
@@ -246,7 +246,8 @@ public class TestMergeTool extends HBaseTestCase {
Path logPath = new Path("/tmp", HConstants.HREGION_LOGDIR_NAME + "_" +
System.currentTimeMillis());
LOG.info("Creating log " + logPath.toString());
- HLog log = new HLog(this.fs, logPath, this.conf, null);
+ Path oldLogDir = new Path("/tmp", HConstants.HREGION_OLDLOGDIR_NAME);
+ HLog log = new HLog(this.fs, logPath, oldLogDir, this.conf, null);
try {
// Merge Region 0 and Region 1
HRegion merged = mergeAndVerify("merging regions 0 and 1",