diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 1eec691..33281b9 100644
--- hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -293,6 +293,15 @@ public final class HConstants {
/** Parameter name for HBase instance root directory */
public static final String HBASE_DIR = "hbase.rootdir";
+ /** Parameter name for HBase instance log directory permission*/
+ public static final String HBASE_DIR_PERMS = "hbase.rootdir.perms";
+
+ /** Parameter name for HBase instance log directory */
+ public static final String HBASE_LOG_DIR = "hbase.regionserver.hlog.dir";
+
+ /** Parameter name for HBase instance log directory permission*/
+ public static final String HBASE_LOG_DIR_PERMS = "hbase.regionserver.hlog.dir.perms";
+
/** Parameter name for HBase client IPC pool type */
public static final String HBASE_CLIENT_IPC_POOL_TYPE = "hbase.client.ipc.pool.type";
diff --git hbase-common/src/main/resources/hbase-default.xml hbase-common/src/main/resources/hbase-default.xml
index dfa3270..4ab50d6 100644
--- hbase-common/src/main/resources/hbase-default.xml
+++ hbase-common/src/main/resources/hbase-default.xml
@@ -1204,6 +1204,13 @@ possible configurations would overwhelm and obscure the important.
if it does not match.
+ hbase.regionserver.hlog.dir.perms
+ 700
+ FS Permissions for the log directory in a secure(kerberos) setup.
+ When master starts, it creates the logdir with this permissions or sets the permissions
+ if it does not match.
+
+
hbase.data.umask.enable
false
Enable, if true, that file permissions should be assigned
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java
index 344d496..c2ad71a 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java
@@ -45,17 +45,17 @@ public class WALLink extends FileLink {
*/
public WALLink(final Configuration conf,
final String serverName, final String logName) throws IOException {
- this(FSUtils.getRootDir(conf), serverName, logName);
+ this(FSUtils.getLogRootDir(conf), serverName, logName);
}
/**
- * @param rootDir Path to the root directory where hbase files are stored
+ * @param logRootDir Path to the root directory where hbase files are stored
* @param serverName Region Server owner of the log
* @param logName WAL file name
*/
- public WALLink(final Path rootDir, final String serverName, final String logName) {
- final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
- final Path logDir = new Path(new Path(rootDir, HConstants.HREGION_LOGDIR_NAME), serverName);
+ public WALLink(final Path logRootDir, final String serverName, final String logName) {
+ final Path oldLogDir = new Path(logRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
+ final Path logDir = new Path(new Path(logRootDir, HConstants.HREGION_LOGDIR_NAME), serverName);
setLocations(new Path(logDir, logName), new Path(oldLogDir, logName));
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 3ab4678..4bd3fa0 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -516,15 +516,15 @@ public class AssignmentManager {
Set queuedDeadServers = serverManager.getRequeuedDeadServers().keySet();
if (!queuedDeadServers.isEmpty()) {
Configuration conf = server.getConfiguration();
- Path rootdir = FSUtils.getRootDir(conf);
- FileSystem fs = rootdir.getFileSystem(conf);
+ Path logRootdir = FSUtils.getLogRootDir(conf);
+ FileSystem logFs = FSUtils.getLogRootDirFileSystem(conf);
for (ServerName serverName: queuedDeadServers) {
// In the case of a clean exit, the shutdown handler would have presplit any WALs and
// removed empty directories.
- Path logDir = new Path(rootdir,
+ Path logDir = new Path(logRootdir,
AbstractFSWALProvider.getWALDirectoryName(serverName.toString()));
Path splitDir = logDir.suffix(AbstractFSWALProvider.SPLITTING_EXT);
- if (checkWals(fs, logDir) || checkWals(fs, splitDir)) {
+ if (checkWals(logFs, logDir) || checkWals(logFs, splitDir)) {
LOG.debug("Found queued dead server " + serverName);
failover = true;
break;
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index a87c38e..cb41d93 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -163,6 +163,7 @@ import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.VersionInfo;
import org.apache.hadoop.hbase.util.ZKDataMigrator;
+import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
@@ -1081,10 +1082,10 @@ public class HMaster extends HRegionServer implements MasterServices {
private void startProcedureExecutor() throws IOException {
final MasterProcedureEnv procEnv = new MasterProcedureEnv(this);
- final Path logDir = new Path(fileSystemManager.getRootDir(),
+ final Path logDir = new Path(FSUtils.getLogRootDir(this.conf),
MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR);
- procedureStore = new WALProcedureStore(conf, fileSystemManager.getFileSystem(), logDir,
+ procedureStore = new WALProcedureStore(conf, logDir.getFileSystem(conf), logDir,
new MasterProcedureEnv.WALStoreLeaseRecovery(this));
procedureStore.registerListener(new MasterProcedureEnv.MasterProcedureStoreListener(this));
procedureExecutor = new ProcedureExecutor(conf, procEnv, procedureStore,
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index a8f81ee..503bd6e 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -61,10 +61,14 @@ public class MasterFileSystem {
private ClusterId clusterId;
// Keep around for convenience.
private final FileSystem fs;
- // root hbase directory on the FS
+ // Keep around for convenience.
+ private final FileSystem logFs;
+ // root log directory on the FS
private final Path rootdir;
// hbase temp directory used for table construction and deletion
private final Path tempdir;
+ // root hbase directory on the FS
+ private final Path logRootDir;
/*
@@ -99,6 +103,10 @@ public class MasterFileSystem {
// Cover both bases, the old way of setting default fs and the new.
// We're supposed to run on 0.20 and 0.21 anyways.
this.fs = this.rootdir.getFileSystem(conf);
+ this.logRootDir = FSUtils.getLogRootDir(conf);
+ this.logFs = FSUtils.getLogRootDirFileSystem(conf);
+ FSUtils.setFsDefault(conf, new Path(this.logFs.getUri()));
+ logFs.setConf(conf);
FSUtils.setFsDefault(conf, new Path(this.fs.getUri()));
// make sure the fs has the same conf
fs.setConf(conf);
@@ -123,20 +131,33 @@ public class MasterFileSystem {
final String[] protectedSubDirs = new String[] {
HConstants.BASE_NAMESPACE_DIR,
HConstants.HFILE_ARCHIVE_DIRECTORY,
- HConstants.HREGION_LOGDIR_NAME,
- HConstants.HREGION_OLDLOGDIR_NAME,
- MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR,
- HConstants.CORRUPT_DIR_NAME,
HConstants.HBCK_SIDELINEDIR_NAME,
MobConstants.MOB_DIR_NAME
};
+
+ final String[] protectedSubLogDirs = new String[] {
+ HConstants.HREGION_LOGDIR_NAME,
+ HConstants.HREGION_OLDLOGDIR_NAME,
+ HConstants.CORRUPT_DIR_NAME,
+ MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR
+ };
// check if the root directory exists
checkRootDir(this.rootdir, conf, this.fs);
// Check the directories under rootdir.
checkTempDir(this.tempdir, conf, this.fs);
for (String subDir : protectedSubDirs) {
- checkSubDir(new Path(this.rootdir, subDir));
+ checkSubDir(new Path(this.rootdir, subDir), HConstants.HBASE_DIR_PERMS);
+ }
+
+ final String perms;
+ if (!this.logRootDir.equals(this.rootdir)) {
+ perms = HConstants.HBASE_LOG_DIR_PERMS;
+ } else {
+ perms = HConstants.HBASE_DIR_PERMS;
+ }
+ for (String subDir : protectedSubLogDirs) {
+ checkSubDir(new Path(this.logRootDir, subDir), perms);
}
checkStagingDir();
@@ -165,6 +186,8 @@ public class MasterFileSystem {
return this.fs;
}
+ public FileSystem getLogFileSystem() { return this.logFs; }
+
public Configuration getConfiguration() {
return this.conf;
}
@@ -177,6 +200,11 @@ public class MasterFileSystem {
}
/**
+ * @return HBase root log dir.
+ */
+ public Path getLogRootDir() { return this.logRootDir; }
+
+ /**
* @return HBase temp dir.
*/
public Path getTempDir() {
@@ -296,7 +324,9 @@ public class MasterFileSystem {
* @param p
* @throws IOException
*/
- private void checkSubDir(final Path p) throws IOException {
+ private void checkSubDir(final Path p, final String dirPermsConfName) throws IOException {
+ FileSystem fs = p.getFileSystem(conf);
+ FsPermission dirPerms = new FsPermission(conf.get(dirPermsConfName, "700"));
if (!fs.exists(p)) {
if (isSecurityEnabled) {
if (!fs.mkdirs(p, secureRootSubDirPerms)) {
@@ -309,14 +339,14 @@ public class MasterFileSystem {
}
}
else {
- if (isSecurityEnabled && !secureRootSubDirPerms.equals(fs.getFileStatus(p).getPermission())) {
+ if (isSecurityEnabled && !dirPerms.equals(fs.getFileStatus(p).getPermission())) {
// check whether the permission match
LOG.warn("Found HBase directory permissions NOT matching expected permissions for "
+ p.toString() + " permissions=" + fs.getFileStatus(p).getPermission()
- + ", expecting " + secureRootSubDirPerms + ". Automatically setting the permissions. "
- + "You can change the permissions by setting \"hbase.rootdir.perms\" in hbase-site.xml "
+ + ", expecting " + dirPerms + ". Automatically setting the permissions. "
+ + "You can change the permissions by setting \"" + dirPermsConfName + "\" in hbase-site.xml "
+ "and restarting the master");
- fs.setPermission(p, secureRootSubDirPerms);
+ fs.setPermission(p, dirPerms);
}
}
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
index f5c7326..2f1266c 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
@@ -90,8 +90,8 @@ public class MasterWalManager {
private volatile boolean fsOk = true;
public MasterWalManager(MasterServices services) throws IOException {
- this(services.getConfiguration(), services.getMasterFileSystem().getFileSystem(),
- services.getMasterFileSystem().getRootDir(), services);
+ this(services.getConfiguration(), services.getMasterFileSystem().getLogFileSystem(),
+ services.getMasterFileSystem().getLogRootDir(), services);
}
public MasterWalManager(Configuration conf, FileSystem fs, Path rootDir, MasterServices services)
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
index 589da14..f0fdb04 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
@@ -262,7 +262,7 @@ public class SplitLogManager {
// recover-lease is done. totalSize will be under in most cases and the
// metrics that it drives will also be under-reported.
totalSize += lf.getLen();
- String pathToLog = FSUtils.removeRootPath(lf.getPath(), conf);
+ String pathToLog = FSUtils.removeLogRootPath(lf.getPath(), conf);
if (!enqueueSplitTask(pathToLog, batch)) {
throw new IOException("duplicate log split scheduled for " + lf.getPath());
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 3c9d54f..b295315 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -306,6 +306,7 @@ public class HRegionServer extends HasThread implements
// If false, the file system has become unavailable
protected volatile boolean fsOk;
protected HFileSystem fs;
+ protected HFileSystem logFs;
// Set when a report to the master comes back with a message asking us to
// shutdown. Also set by call to stop when debugging or running unit tests
@@ -327,6 +328,7 @@ public class HRegionServer extends HasThread implements
protected final Configuration conf;
private Path rootDir;
+ private Path logRootDir;
protected final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
@@ -646,13 +648,16 @@ public class HRegionServer extends HasThread implements
}
private void initializeFileSystem() throws IOException {
+ // Get fs instance used by this RS. Do we use checksum verification in the hbase? If hbase
+ // checksum verification enabled, then automatically switch off hdfs checksum verification.
+ boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
+ FSUtils.setFsDefault(this.conf, FSUtils.getLogRootDir(this.conf));
+ this.logFs = new HFileSystem(this.conf, useHBaseChecksum);
+ this.logRootDir = FSUtils.getLogRootDir(this.conf);
// Set 'fs.defaultFS' to match the filesystem on hbase.rootdir else
// underlying hadoop hdfs accessors will be going against wrong filesystem
// (unless all is set to defaults).
FSUtils.setFsDefault(this.conf, FSUtils.getRootDir(this.conf));
- // Get fs instance used by this RS. Do we use checksum verification in the hbase? If hbase
- // checksum verification enabled, then automatically switch off hdfs checksum verification.
- boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
this.fs = new HFileSystem(this.conf, useHBaseChecksum);
this.rootDir = FSUtils.getRootDir(this.conf);
this.tableDescriptors = getFsTableDescriptors();
@@ -1722,19 +1727,19 @@ public class HRegionServer extends HasThread implements
*/
private WALFactory setupWALAndReplication() throws IOException {
// TODO Replication make assumptions here based on the default filesystem impl
- final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
+ final Path oldLogDir = new Path(logRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
final String logName = AbstractFSWALProvider.getWALDirectoryName(this.serverName.toString());
- Path logdir = new Path(rootDir, logName);
- if (LOG.isDebugEnabled()) LOG.debug("logdir=" + logdir);
- if (this.fs.exists(logdir)) {
+ Path logDir = new Path(logRootDir, logName);
+ if (LOG.isDebugEnabled()) LOG.debug("logDir=" + logDir);
+ if (this.logFs.exists(logDir)) {
throw new RegionServerRunningException("Region server has already " +
"created directory at " + this.serverName.toString());
}
// Instantiate replication manager if replication enabled. Pass it the
// log directories.
- createNewReplicationInstance(conf, this, this.fs, logdir, oldLogDir);
+ createNewReplicationInstance(conf, this, this.logFs, logDir, oldLogDir);
// listeners the wal factory will add to wals it creates.
final List listeners = new ArrayList();
@@ -2711,6 +2716,20 @@ public class HRegionServer extends HasThread implements
return fs;
}
+ /**
+ * @return Return the logRootDir.
+ */
+ protected Path getLogRootDir() {
+ return logRootDir;
+ }
+
+ /**
+ * @return Return the logFs.
+ */
+ public FileSystem getLogFileSystem() {
+ return logFs;
+ }
+
@Override
public String toString() {
return getServerName().toString();
@@ -2777,7 +2796,7 @@ public class HRegionServer extends HasThread implements
* Load the replication service objects, if any
*/
static private void createNewReplicationInstance(Configuration conf,
- HRegionServer server, FileSystem fs, Path logDir, Path oldLogDir) throws IOException{
+ HRegionServer server, FileSystem logFs, Path logDir, Path oldLogDir) throws IOException{
if ((server instanceof HMaster) &&
(!BaseLoadBalancer.userTablesOnMaster(conf))) {
@@ -2797,21 +2816,21 @@ public class HRegionServer extends HasThread implements
if (sourceClassname.equals(sinkClassname)) {
server.replicationSourceHandler = (ReplicationSourceService)
newReplicationInstance(sourceClassname,
- conf, server, fs, logDir, oldLogDir);
+ conf, server, logFs, logDir, oldLogDir);
server.replicationSinkHandler = (ReplicationSinkService)
server.replicationSourceHandler;
} else {
server.replicationSourceHandler = (ReplicationSourceService)
newReplicationInstance(sourceClassname,
- conf, server, fs, logDir, oldLogDir);
+ conf, server, logFs, logDir, oldLogDir);
server.replicationSinkHandler = (ReplicationSinkService)
newReplicationInstance(sinkClassname,
- conf, server, fs, logDir, oldLogDir);
+ conf, server, logFs, logDir, oldLogDir);
}
}
static private ReplicationService newReplicationInstance(String classname,
- Configuration conf, HRegionServer server, FileSystem fs, Path logDir,
+ Configuration conf, HRegionServer server, FileSystem logFs, Path logDir,
Path oldLogDir) throws IOException{
Class> clazz = null;
@@ -2825,7 +2844,7 @@ public class HRegionServer extends HasThread implements
// create an instance of the replication object.
ReplicationService service = (ReplicationService)
ReflectionUtils.newInstance(clazz, conf);
- service.initialize(server, fs, logDir, oldLogDir);
+ service.initialize(server, logFs, logDir, oldLogDir);
return service;
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
index 0eceb92..a1297a0 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
@@ -88,11 +88,11 @@ public class SplitLogWorker implements Runnable {
this(server, conf, server, new TaskExecutor() {
@Override
public Status exec(String filename, RecoveryMode mode, CancelableProgressable p) {
- Path rootdir;
+ Path logDir;
FileSystem fs;
try {
- rootdir = FSUtils.getRootDir(conf);
- fs = rootdir.getFileSystem(conf);
+ logDir = FSUtils.getLogRootDir(conf);
+ fs = logDir.getFileSystem(conf);
} catch (IOException e) {
LOG.warn("could not find root dir or fs", e);
return Status.RESIGNED;
@@ -101,7 +101,7 @@ public class SplitLogWorker implements Runnable {
// interrupted or has encountered a transient error and when it has
// encountered a bad non-retry-able persistent error.
try {
- if (!WALSplitter.splitLogFile(rootdir, fs.getFileStatus(new Path(rootdir, filename)),
+ if (!WALSplitter.splitLogFile(logDir, fs.getFileStatus(new Path(logDir, filename)),
fs, conf, p, sequenceIdChecker, server.getCoordinatedStateManager(), mode, factory)) {
return Status.PREEMPTED;
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
index 34103dd..afeed4b 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
@@ -801,7 +801,7 @@ public class FSHLog extends AbstractFSWAL {
+ Bytes.SIZEOF_INT + (3 * Bytes.SIZEOF_LONG));
private static void split(final Configuration conf, final Path p) throws IOException {
- FileSystem fs = FileSystem.get(conf);
+ FileSystem fs = FSUtils.getLogRootDirFileSystem(conf);
if (!fs.exists(p)) {
throw new FileNotFoundException(p.toString());
}
@@ -809,7 +809,7 @@ public class FSHLog extends AbstractFSWAL {
throw new IOException(p + " is not a directory");
}
- final Path baseDir = FSUtils.getRootDir(conf);
+ final Path baseDir = FSUtils.getLogRootDir(conf);
final Path archiveDir = new Path(baseDir, HConstants.HREGION_OLDLOGDIR_NAME);
WALSplitter.split(baseDir, p, archiveDir, fs, conf, WALFactory.getInstance(conf));
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 7a229eb..ca9f58d 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -962,9 +962,9 @@ public class ReplicationSource extends Thread
// to look at)
List deadRegionServers = this.replicationQueueInfo.getDeadRegionServers();
LOG.info("NB dead servers : " + deadRegionServers.size());
- final Path rootDir = FSUtils.getRootDir(conf);
+ final Path logDir = FSUtils.getLogRootDir(conf);
for (String curDeadServerName : deadRegionServers) {
- final Path deadRsDirectory = new Path(rootDir,
+ final Path deadRsDirectory = new Path(logDir,
AbstractFSWALProvider.getWALDirectoryName(curDeadServerName));
Path[] locs = new Path[] { new Path(deadRsDirectory, currentPath.getName()),
new Path(deadRsDirectory.suffix(AbstractFSWALProvider.SPLITTING_EXT),
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
index bfc6421..6e62682 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
@@ -76,7 +76,7 @@ public class ReplicationSyncUp extends Configured implements Tool {
Replication replication;
ReplicationSourceManager manager;
FileSystem fs;
- Path oldLogDir, logDir, rootDir;
+ Path oldLogDir, logDir, logRootDir;
ZooKeeperWatcher zkw;
Abortable abortable = new Abortable() {
@@ -94,10 +94,10 @@ public class ReplicationSyncUp extends Configured implements Tool {
new ZooKeeperWatcher(conf, "syncupReplication" + System.currentTimeMillis(), abortable,
true);
- rootDir = FSUtils.getRootDir(conf);
- fs = FileSystem.get(conf);
- oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
- logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
+ logRootDir = FSUtils.getLogRootDir(conf);
+ fs = FSUtils.getLogRootDirFileSystem(conf);
+ oldLogDir = new Path(logRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
+ logDir = new Path(logRootDir, HConstants.HREGION_LOGDIR_NAME);
System.out.println("Start Replication Server start");
replication = new Replication(new DummyServer(zkw), fs, logDir, oldLogDir);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index abddd78..fd0dc6e 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -99,6 +99,10 @@ import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
+
+import static org.apache.hadoop.hbase.HConstants.HBASE_DIR;
+import static org.apache.hadoop.hbase.HConstants.HBASE_LOG_DIR;
+
/**
* Utility methods for interacting with the underlying file system.
*/
@@ -949,22 +953,22 @@ public abstract class FSUtils {
return root;
} catch (URISyntaxException e) {
IOException io = new IOException("Root directory path is not a valid " +
- "URI -- check your " + HConstants.HBASE_DIR + " configuration");
+ "URI -- check your " + HBASE_DIR + " configuration");
io.initCause(e);
throw io;
}
}
/**
- * Checks for the presence of the root path (using the provided conf object) in the given path. If
+ * Checks for the presence of the WAL log root path (using the provided conf object) in the given path. If
* it exists, this method removes it and returns the String representation of remaining relative path.
* @param path
* @param conf
* @return String representation of the remaining relative path
* @throws IOException
*/
- public static String removeRootPath(Path path, final Configuration conf) throws IOException {
- Path root = FSUtils.getRootDir(conf);
+ public static String removeLogRootPath(Path path, final Configuration conf) throws IOException {
+ Path root = FSUtils.getLogRootDir(conf);
String pathStr = path.toString();
// check that the path is absolute... it has the root path in it.
if (!pathStr.startsWith(root.toString())) return pathStr;
@@ -1011,18 +1015,18 @@ public abstract class FSUtils {
/**
* @param c configuration
- * @return Path to hbase root directory: i.e. hbase.rootdir from
+ * @return {@link Path} to hbase root directory: i.e. {@value Hconstants#HBASE_DIR} from
* configuration as a qualified Path.
* @throws IOException e
*/
public static Path getRootDir(final Configuration c) throws IOException {
- Path p = new Path(c.get(HConstants.HBASE_DIR));
+ Path p = new Path(c.get(HBASE_DIR));
FileSystem fs = p.getFileSystem(c);
return p.makeQualified(fs);
}
public static void setRootDir(final Configuration c, final Path root) throws IOException {
- c.set(HConstants.HBASE_DIR, root.toString());
+ c.set(HBASE_DIR, root.toString());
}
public static void setFsDefault(final Configuration c, final Path root) throws IOException {
@@ -1030,6 +1034,32 @@ public abstract class FSUtils {
}
/**
+ * @param c configuration
+ * @return {@link Path} to hbase log root directory: i.e. {@value Hconstants#HBASE_LOG_DIR} from
+ * configuration as a qualified Path. Defaults to {@value Hconstants#HBASE_DIR}
+ * @throws IOException e
+ */
+ public static Path getLogRootDir(final Configuration c) throws IOException {
+ Path p = new Path(c.get(HBASE_LOG_DIR, c.get(HBASE_DIR)));
+ FileSystem fs = p.getFileSystem(c);
+ return p.makeQualified(fs);
+ }
+
+ public static void setLogRootDir(final Configuration c, final Path root) throws IOException {
+ c.set(HBASE_LOG_DIR, root.toString());
+ }
+
+ public static FileSystem getLogRootDirFileSystem(final Configuration c) throws IOException {
+ Path p = getLogRootDir(c);
+ return p.getFileSystem(c);
+ }
+
+ public static FileSystem getRootDirFileSystem(final Configuration c) throws IOException {
+ Path p = getRootDir(c);
+ return p.getFileSystem(c);
+ }
+
+ /**
* Checks if meta region exists
*
* @param fs file system
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java
index b8fffb3..92841ce 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AsyncFSWALProvider.java
@@ -56,7 +56,7 @@ public class AsyncFSWALProvider extends AbstractFSWALProvider {
@Override
protected AsyncFSWAL createWAL() throws IOException {
- return new AsyncFSWAL(FileSystem.get(conf), FSUtils.getRootDir(conf),
+ return new AsyncFSWAL(FSUtils.getLogRootDirFileSystem(conf), FSUtils.getLogRootDir(conf),
getWALDirectoryName(factory.factoryId), HConstants.HREGION_OLDLOGDIR_NAME, conf, listeners,
true, logPrefix, META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null,
eventLoopGroup.next());
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
index 7f10d7d..7ce852b 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
@@ -62,7 +62,7 @@ class DisabledWALProvider implements WALProvider {
if (null == providerId) {
providerId = "defaultDisabled";
}
- disabled = new DisabledWAL(new Path(FSUtils.getRootDir(conf), providerId), conf, null);
+ disabled = new DisabledWAL(new Path(FSUtils.getLogRootDir(conf), providerId), conf, null);
}
@Override
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java
index 5631db8..434616a 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/wal/FSHLogProvider.java
@@ -75,7 +75,7 @@ public class FSHLogProvider extends AbstractFSWALProvider {
@Override
protected FSHLog createWAL() throws IOException {
- return new FSHLog(FileSystem.get(conf), FSUtils.getRootDir(conf),
+ return new FSHLog(FSUtils.getLogRootDirFileSystem(conf), FSUtils.getLogRootDir(conf),
getWALDirectoryName(factory.factoryId), HConstants.HREGION_OLDLOGDIR_NAME, conf, listeners,
true, logPrefix, META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null);
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 42d70f4..eebde32 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -432,7 +432,7 @@ public class WALSplitter {
*/
public static void finishSplitLogFile(String logfile,
Configuration conf) throws IOException {
- Path rootdir = FSUtils.getRootDir(conf);
+ Path rootdir = FSUtils.getLogRootDir(conf);
Path oldLogDir = new Path(rootdir, HConstants.HREGION_OLDLOGDIR_NAME);
Path logPath;
if (FSUtils.isStartingWithPath(rootdir, logfile)) {
@@ -475,7 +475,7 @@ public class WALSplitter {
final List corruptedLogs,
final List processedLogs, final Path oldLogDir,
final FileSystem fs, final Configuration conf) throws IOException {
- final Path corruptDir = new Path(FSUtils.getRootDir(conf), HConstants.CORRUPT_DIR_NAME);
+ final Path corruptDir = new Path(FSUtils.getLogRootDir(conf), HConstants.CORRUPT_DIR_NAME);
if (conf.get("hbase.regionserver.hlog.splitlog.corrupt.dir") != null) {
LOG.warn("hbase.regionserver.hlog.splitlog.corrupt.dir is deprecated. Default to "
+ corruptDir);
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 47fed8d..d18fc62 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -868,6 +868,16 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
}
/**
+ * Start up a minicluster of hbase, dfs, and zookeeper where WAL's logDir is created separately.
+ * @throws Exception
+ * @return Mini hbase cluster instance created.
+ * @see {@link #shutdownMiniDFSCluster()}
+ */
+ public MiniHBaseCluster startMiniClusterWithLogDir() throws Exception {
+ return startMiniClusterWithLogDir(1, 1);
+ }
+
+ /**
* Start up a minicluster of hbase, dfs, and zookeeper.
* Set the create flag to create root or data directory path or not
* (will overwrite if dir already exists)
@@ -898,6 +908,11 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
return startMiniCluster(1, numSlaves, false);
}
+ public MiniHBaseCluster startMiniClusterWithLogDir(final int numSlaves)
+ throws Exception {
+ return startMiniClusterWithLogDir(1, numSlaves, false);
+ }
+
/**
* Start minicluster. Whether to create a new root or data dir path even if such a path
* has been created earlier is decided based on flag create
@@ -911,6 +926,12 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
return startMiniCluster(numMasters, numSlaves, null, create);
}
+ public MiniHBaseCluster startMiniClusterWithLogDir(final int numMasters,
+ final int numSlaves, boolean create)
+ throws Exception {
+ return startMiniClusterWithLogDir(numMasters, numSlaves, null, create);
+ }
+
/**
* start minicluster
* @throws Exception
@@ -923,6 +944,12 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
return startMiniCluster(numMasters, numSlaves, null, false);
}
+ public MiniHBaseCluster startMiniClusterWithLogDir(final int numMasters,
+ final int numSlaves)
+ throws Exception {
+ return startMiniClusterWithLogDir(numMasters, numSlaves, null, false);
+ }
+
public MiniHBaseCluster startMiniCluster(final int numMasters,
final int numSlaves, final String[] dataNodeHosts, boolean create)
throws Exception {
@@ -930,6 +957,13 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
null, null, create);
}
+ public MiniHBaseCluster startMiniClusterWithLogDir(final int numMasters,
+ final int numSlaves, final String[] dataNodeHosts, boolean create)
+ throws Exception {
+ return startMiniClusterWithLogDir(numMasters, numSlaves, numSlaves, dataNodeHosts,
+ null, null, create);
+ }
+
/**
* Start up a minicluster of hbase, optionally dfs, and zookeeper.
* Modifies Configuration. Homes the cluster data directory under a random
@@ -1055,12 +1089,57 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
// Start the MiniHBaseCluster
return startMiniHBaseCluster(numMasters, numSlaves, masterClass,
- regionserverClass, create);
+ regionserverClass, create, false);
+ }
+
+ /**
+ * Same as {@link #startMiniCluster(int, int, String[], Class, Class)}, but with custom
+ * number of datanodes.
+ * @param numDataNodes Number of data nodes.
+ * @param create Set this flag to create a new
+ * root or data directory path or not (will overwrite if exists already).
+ */
+ public MiniHBaseCluster startMiniClusterWithLogDir(final int numMasters,
+ final int numSlaves, int numDataNodes, final String[] dataNodeHosts,
+ Class extends HMaster> masterClass,
+ Class extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass,
+ boolean create)
+ throws Exception {
+ if (dataNodeHosts != null && dataNodeHosts.length != 0) {
+ numDataNodes = dataNodeHosts.length;
+ }
+
+ LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
+ numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
+
+ // If we already put up a cluster, fail.
+ if (miniClusterRunning) {
+ throw new IllegalStateException("A mini-cluster is already running");
+ }
+ miniClusterRunning = true;
+
+ setupClusterTestDir();
+ System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
+
+ // Bring up mini dfs cluster. This spews a bunch of warnings about missing
+ // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
+ if(this.dfsCluster == null) {
+ dfsCluster = startMiniDFSCluster(numDataNodes, dataNodeHosts);
+ }
+
+ // Start up a zk cluster.
+ if (this.zkCluster == null) {
+ startMiniZKCluster(clusterTestDir);
+ }
+
+ // Start the MiniHBaseCluster
+ return startMiniHBaseCluster(numMasters, numSlaves, masterClass,
+ regionserverClass, create, true);
}
public MiniHBaseCluster startMiniHBaseCluster(final int numMasters, final int numSlaves)
throws IOException, InterruptedException{
- return startMiniHBaseCluster(numMasters, numSlaves, null, null, false);
+ return startMiniHBaseCluster(numMasters, numSlaves, null, null, false, false);
}
/**
@@ -1079,11 +1158,13 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
public MiniHBaseCluster startMiniHBaseCluster(final int numMasters,
final int numSlaves, Class extends HMaster> masterClass,
Class extends MiniHBaseCluster.MiniHBaseClusterRegionServer> regionserverClass,
- boolean create)
+ boolean create, boolean withLogDir)
throws IOException, InterruptedException {
// Now do the mini hbase cluster. Set the hbase.rootdir in config.
createRootDir(create);
-
+ if (withLogDir) {
+ createLogRootDir(create);
+ }
// Set the hbase.fs.tmp.dir config to make sure that we have some default value. This is
// for tests that do not read hbase-defaults.xml
setHBaseFsTmpDir();
@@ -1273,6 +1354,40 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
return createRootDir(false);
}
+ /**
+ * Creates a hbase logdir in the user's home directory. Also creates hbase
+ * version file. Normally you won't make use of this method. Root hbaselogdir
+ * is created for you as part of mini cluster startup. You'd only use this
+ * method if you were doing manual operation.
+ *
+ * @param create This flag decides whether to get a new
+ * root or data directory path or not, if it has been fetched already.
+ * Note : Directory will be made irrespective of whether path has been fetched or not.
+ * If directory already exists, it will be overwritten
+ * @return Fully qualified path to hbase root dir
+ * @throws IOException
+ */
+ public Path createLogRootDir(boolean create) throws IOException {
+ FileSystem fs = FileSystem.get(this.conf);
+ String randomStr = UUID.randomUUID().toString();
+ Path logBase = new Path(getDefaultRootDirPath(create), randomStr);
+ Path hbaseLogRootdir = new Path(logBase, "logRootDir");
+ FSUtils.setLogRootDir(this.conf, hbaseLogRootdir);
+ fs.mkdirs(hbaseLogRootdir);
+ FSUtils.setVersion(fs, hbaseLogRootdir);
+ return hbaseLogRootdir;
+ }
+
+ /**
+ * Same as {@link HBaseTestingUtility#createLogRootDir(boolean create)}
+ * except that create flag is false.
+ *
+ * @return Fully qualified path to hbase root dir
+ * @throws IOException
+ */
+ public Path createLogRootDir() throws IOException {
+ return createLogRootDir(false);
+ }
private void setHBaseFsTmpDir() throws IOException {
String hbaseFsTmpDirInString = this.conf.get("hbase.fs.tmp.dir");
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
index 6eca7f0..afad1e8 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
@@ -100,6 +100,7 @@ public class TestWALObserver {
private FileSystem fs;
private Path dir;
private Path hbaseRootDir;
+ private Path hbaseLogRootDir;
private String logName;
private Path oldLogDir;
private Path logDir;
@@ -117,8 +118,11 @@ public class TestWALObserver {
TEST_UTIL.startMiniCluster(1);
Path hbaseRootDir = TEST_UTIL.getDFSCluster().getFileSystem()
.makeQualified(new Path("/hbase"));
+ Path hbaseLogRootDir = TEST_UTIL.getDFSCluster().getFileSystem()
+ .makeQualified(new Path("/hbaseLogRoot"));
LOG.info("hbase.rootdir=" + hbaseRootDir);
FSUtils.setRootDir(conf, hbaseRootDir);
+ FSUtils.setLogRootDir(conf, hbaseLogRootDir);
}
@AfterClass
@@ -132,16 +136,20 @@ public class TestWALObserver {
// this.cluster = TEST_UTIL.getDFSCluster();
this.fs = TEST_UTIL.getDFSCluster().getFileSystem();
this.hbaseRootDir = FSUtils.getRootDir(conf);
+ this.hbaseLogRootDir = FSUtils.getLogRootDir(conf);
this.dir = new Path(this.hbaseRootDir, TestWALObserver.class.getName());
- this.oldLogDir = new Path(this.hbaseRootDir,
+ this.oldLogDir = new Path(this.hbaseLogRootDir,
HConstants.HREGION_OLDLOGDIR_NAME);
- this.logDir = new Path(this.hbaseRootDir,
+ this.logDir = new Path(this.hbaseLogRootDir,
AbstractFSWALProvider.getWALDirectoryName(currentTest.getMethodName()));
this.logName = HConstants.HREGION_LOGDIR_NAME;
if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
}
+ if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseLogRootDir)) {
+ TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseLogRootDir, true);
+ }
this.wals = new WALFactory(conf, null, currentTest.getMethodName());
}
@@ -155,6 +163,7 @@ public class TestWALObserver {
LOG.debug("details of failure to close wal factory.", exception);
}
TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
+ TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseLogRootDir, true);
}
/**
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java
index c17d408..c8a9a8e 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java
@@ -30,6 +30,7 @@ import java.io.PrintStream;
import java.util.ArrayList;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
@@ -45,6 +46,7 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.WALPlayer.WALKeyValueMapper;
+import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MapReduceTests;
@@ -69,15 +71,27 @@ import org.mockito.stubbing.Answer;
public class TestWALPlayer {
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static MiniHBaseCluster cluster;
+ private static Path rootDir;
+ private static Path logRootDir;
+ private static FileSystem fs;
+ private static FileSystem logFs;
+ private static Configuration conf;
@BeforeClass
public static void beforeClass() throws Exception {
+ conf= TEST_UTIL.getConfiguration();
+ rootDir = TEST_UTIL.createRootDir();
+ logRootDir = TEST_UTIL.createLogRootDir();
+ fs = FSUtils.getRootDirFileSystem(conf);
+ logFs = FSUtils.getLogRootDirFileSystem(conf);
cluster = TEST_UTIL.startMiniCluster();
}
@AfterClass
public static void afterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
+ fs.delete(rootDir, true);
+ logFs.delete(logRootDir, true);
}
/**
@@ -109,7 +123,7 @@ public class TestWALPlayer {
WAL log = cluster.getRegionServer(0).getWAL(null);
log.rollWriter();
String walInputDir = new Path(cluster.getMaster().getMasterFileSystem()
- .getRootDir(), HConstants.HREGION_LOGDIR_NAME).toString();
+ .getLogRootDir(), HConstants.HREGION_LOGDIR_NAME).toString();
Configuration configuration= TEST_UTIL.getConfiguration();
WALPlayer player = new WALPlayer(configuration);
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
index aee2a06..99ea818 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.mapreduce.WALInputFormat.WALKeyRecordReader;
import org.apache.hadoop.hbase.mapreduce.WALInputFormat.WALRecordReader;
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.wal.WALFactory;
import org.apache.hadoop.hbase.wal.WALKey;
@@ -68,6 +69,8 @@ public class TestWALRecordReader {
private static Configuration conf;
private static FileSystem fs;
private static Path hbaseDir;
+ private static FileSystem logFs;
+ private static Path logRootDir;
// visible for TestHLogRecordReader
static final TableName tableName = TableName.valueOf(getName());
private static final byte [] rowName = tableName.getName();
@@ -88,12 +91,9 @@ public class TestWALRecordReader {
@Before
public void setUp() throws Exception {
+ fs.delete(hbaseDir, true);
+ logFs.delete(logRootDir, true);
mvcc = new MultiVersionConcurrencyControl();
- FileStatus[] entries = fs.listStatus(hbaseDir);
- for (FileStatus dir : entries) {
- fs.delete(dir.getPath(), true);
- }
-
}
@BeforeClass
public static void setUpBeforeClass() throws Exception {
@@ -107,8 +107,9 @@ public class TestWALRecordReader {
fs = TEST_UTIL.getDFSCluster().getFileSystem();
hbaseDir = TEST_UTIL.createRootDir();
-
- logDir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME);
+ logRootDir = TEST_UTIL.createLogRootDir();
+ logFs = FSUtils.getLogRootDirFileSystem(conf);
+ logDir = new Path(logRootDir, HConstants.HREGION_LOGDIR_NAME);
htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor(family));
@@ -116,6 +117,8 @@ public class TestWALRecordReader {
@AfterClass
public static void tearDownAfterClass() throws Exception {
+ fs.delete(hbaseDir, true);
+ logFs.delete(logRootDir, true);
TEST_UTIL.shutdownMiniCluster();
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithLogDir.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithLogDir.java
new file mode 100644
index 0000000..265e3a9
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithLogDir.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.junit.Assert.assertEquals;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test the master filesystem in a local cluster
+ */
+@Category({MasterTests.class, MediumTests.class})
+public class TestMasterFileSystemWithLogDir {
+
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ @BeforeClass
+ public static void setupTest() throws Exception {
+ UTIL.startMiniClusterWithLogDir();
+ }
+
+ @AfterClass
+ public static void teardownTest() throws Exception {
+ UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testFsUriSetProperly() throws Exception {
+ HMaster master = UTIL.getMiniHBaseCluster().getMaster();
+ MasterFileSystem fs = master.getMasterFileSystem();
+ Path masterRoot = FSUtils.getRootDir(fs.getConfiguration());
+ Path rootDir = FSUtils.getRootDir(fs.getFileSystem().getConf());
+ assertEquals(masterRoot, rootDir);
+ assertEquals(FSUtils.getLogRootDir(UTIL.getConfiguration()), fs.getLogRootDir());
+ }
+}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithLogDir.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithLogDir.java
new file mode 100644
index 0000000..38323be
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithLogDir.java
@@ -0,0 +1,36 @@
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.junit.Test;
+
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
+
+public class TestHRegionServerBulkLoadWithLogDir extends TestHRegionServerBulkLoad {
+ public TestHRegionServerBulkLoadWithLogDir(int duration) {
+ super(duration);
+ }
+ /**
+ * Atomic bulk load.
+ */
+ @Test
+ public void testAtomicBulkLoad() throws Exception {
+ TableName TABLE_NAME = TableName.valueOf("atomicBulkLoad");
+
+ int millisToRun = 30000;
+ int numScanners = 50;
+
+ UTIL.startMiniClusterWithLogDir(1);
+ try {
+ WAL log = UTIL.getHBaseCluster().getRegionServer(0).getWAL(null);
+ FindBulkHBaseListener listener = new FindBulkHBaseListener();
+ log.registerWALActionsListener(listener);
+ runAtomicBulkloadTest(TABLE_NAME, millisToRun, numScanners);
+ assertThat(listener.isFound(), is(true));
+ } finally {
+ UTIL.shutdownMiniCluster();
+ }
+ }
+}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
index 19759d1..9db0bec 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
@@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
+import static org.junit.Assert.assertNotEquals;
import java.io.IOException;
import java.util.ArrayList;
@@ -89,7 +90,9 @@ public abstract class AbstractTestFSWAL {
FS.delete(dir.getPath(), true);
}
final Path hbaseDir = TEST_UTIL.createRootDir();
- DIR = new Path(hbaseDir, currentTest.getMethodName());
+ final Path hbaseLogDir = TEST_UTIL.createLogRootDir();
+ DIR = new Path(hbaseLogDir, currentTest.getMethodName());
+ assertNotEquals(hbaseDir, hbaseLogDir);
}
@BeforeClass
@@ -134,7 +137,7 @@ public abstract class AbstractTestFSWAL {
// test to see whether the coprocessor is loaded or not.
AbstractFSWAL> log = null;
try {
- log = newWAL(FS, FSUtils.getRootDir(CONF), DIR.toString(), HConstants.HREGION_OLDLOGDIR_NAME,
+ log = newWAL(FS, FSUtils.getLogRootDir(CONF), DIR.toString(), HConstants.HREGION_OLDLOGDIR_NAME,
CONF, null, true, null, null);
WALCoprocessorHost host = log.getCoprocessorHost();
Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName());
@@ -182,7 +185,7 @@ public abstract class AbstractTestFSWAL {
AbstractFSWAL> wal1 = null;
AbstractFSWAL> walMeta = null;
try {
- wal1 = newWAL(FS, FSUtils.getRootDir(CONF), DIR.toString(), HConstants.HREGION_OLDLOGDIR_NAME,
+ wal1 = newWAL(FS, FSUtils.getLogRootDir(CONF), DIR.toString(), HConstants.HREGION_OLDLOGDIR_NAME,
CONF, null, true, null, null);
LOG.debug("Log obtained is: " + wal1);
Comparator comp = wal1.LOG_NAME_COMPARATOR;
@@ -193,7 +196,7 @@ public abstract class AbstractTestFSWAL {
// comparing with different filenum.
assertTrue(comp.compare(p1, p2) < 0);
walMeta =
- newWAL(FS, FSUtils.getRootDir(CONF), DIR.toString(), HConstants.HREGION_OLDLOGDIR_NAME,
+ newWAL(FS, FSUtils.getLogRootDir(CONF), DIR.toString(), HConstants.HREGION_OLDLOGDIR_NAME,
CONF, null, true, null, AbstractFSWALProvider.META_WAL_PROVIDER_ID);
Comparator compMeta = walMeta.LOG_NAME_COMPARATOR;
@@ -240,7 +243,7 @@ public abstract class AbstractTestFSWAL {
LOG.debug("testFindMemStoresEligibleForFlush");
Configuration conf1 = HBaseConfiguration.create(CONF);
conf1.setInt("hbase.regionserver.maxlogs", 1);
- AbstractFSWAL> wal = newWAL(FS, FSUtils.getRootDir(conf1), DIR.toString(),
+ AbstractFSWAL> wal = newWAL(FS, FSUtils.getLogRootDir(conf1), DIR.toString(),
HConstants.HREGION_OLDLOGDIR_NAME, conf1, null, true, null, null);
HTableDescriptor t1 =
new HTableDescriptor(TableName.valueOf("t1")).addFamily(new HColumnDescriptor("row"));
@@ -325,7 +328,7 @@ public abstract class AbstractTestFSWAL {
@Test(expected = IOException.class)
public void testFailedToCreateWALIfParentRenamed() throws IOException {
final String name = "testFailedToCreateWALIfParentRenamed";
- AbstractFSWAL> log = newWAL(FS, FSUtils.getRootDir(CONF), name,
+ AbstractFSWAL> log = newWAL(FS, FSUtils.getLogRootDir(CONF), name,
HConstants.HREGION_OLDLOGDIR_NAME, CONF, null, true, null, null);
long filenum = System.currentTimeMillis();
Path path = log.computeFilename(filenum);
@@ -364,7 +367,7 @@ public abstract class AbstractTestFSWAL {
scopes.put(fam, 0);
}
// subclass and doctor a method.
- AbstractFSWAL> wal = newSlowWAL(FS, FSUtils.getRootDir(CONF), DIR.toString(), testName, CONF,
+ AbstractFSWAL> wal = newSlowWAL(FS, FSUtils.getLogRootDir(CONF), DIR.toString(), testName, CONF,
null, true, null, null, new Runnable() {
@Override
@@ -424,7 +427,7 @@ public abstract class AbstractTestFSWAL {
@Test
public void testSyncNoAppend() throws IOException {
String testName = currentTest.getMethodName();
- AbstractFSWAL> wal = newWAL(FS, FSUtils.getRootDir(CONF), DIR.toString(), testName, CONF,
+ AbstractFSWAL> wal = newWAL(FS, FSUtils.getLogRootDir(CONF), DIR.toString(), testName, CONF,
null, true, null, null);
try {
wal.sync();
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java
index 9851815..8e47507 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java
@@ -71,7 +71,8 @@ public class TestLogRollAbort {
/* For the split-then-roll test */
private static final Path HBASEDIR = new Path("/hbase");
- private static final Path OLDLOGDIR = new Path(HBASEDIR, HConstants.HREGION_OLDLOGDIR_NAME);
+ private static final Path HBASELOGDIR = new Path("/hbaselog");
+ private static final Path OLDLOGDIR = new Path(HBASELOGDIR, HConstants.HREGION_OLDLOGDIR_NAME);
// Need to override this setup so we can edit the config before it gets sent
// to the HDFS & HBase cluster startup.
@@ -111,6 +112,7 @@ public class TestLogRollAbort {
// disable region rebalancing (interferes with log watching)
cluster.getMaster().balanceSwitch(false);
FSUtils.setRootDir(conf, HBASEDIR);
+ FSUtils.setLogRootDir(conf, HBASELOGDIR);
}
@After
@@ -176,7 +178,7 @@ public class TestLogRollAbort {
public void testLogRollAfterSplitStart() throws IOException {
LOG.info("Verify wal roll after split starts will fail.");
String logName = "testLogRollAfterSplitStart";
- Path thisTestsDir = new Path(HBASEDIR, AbstractFSWALProvider.getWALDirectoryName(logName));
+ Path thisTestsDir = new Path(HBASELOGDIR, AbstractFSWALProvider.getWALDirectoryName(logName));
final WALFactory wals = new WALFactory(conf, null, logName);
try {
@@ -218,7 +220,7 @@ public class TestLogRollAbort {
LOG.debug("Renamed region directory: " + rsSplitDir);
LOG.debug("Processing the old log files.");
- WALSplitter.split(HBASEDIR, rsSplitDir, OLDLOGDIR, fs, conf, wals);
+ WALSplitter.split(HBASELOGDIR, rsSplitDir, OLDLOGDIR, fs, conf, wals);
LOG.debug("Trying to roll the WAL.");
try {
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java
index b6bb7a0..47c3c5f 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java
@@ -55,21 +55,27 @@ public class TestWALActionsListener {
new HBaseTestingUtility();
private final static byte[] SOME_BYTES = Bytes.toBytes("t");
- private static FileSystem fs;
private static Configuration conf;
+ private static Path rootDir;
+ private static Path logRootDir;
+ private static FileSystem fs;
+ private static FileSystem logFs;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
conf = TEST_UTIL.getConfiguration();
conf.setInt("hbase.regionserver.maxlogs", 5);
- fs = FileSystem.get(conf);
- FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
+ rootDir = TEST_UTIL.createRootDir();
+ logRootDir = TEST_UTIL.createLogRootDir();
+ fs = FSUtils.getRootDirFileSystem(conf);
+ logFs = FSUtils.getLogRootDirFileSystem(conf);
}
@Before
public void setUp() throws Exception {
- fs.delete(new Path(TEST_UTIL.getDataTestDir(), HConstants.HREGION_LOGDIR_NAME), true);
- fs.delete(new Path(TEST_UTIL.getDataTestDir(), HConstants.HREGION_OLDLOGDIR_NAME), true);
+ fs.delete(rootDir, true);
+ logFs.delete(new Path(logRootDir, HConstants.HREGION_LOGDIR_NAME), true);
+ logFs.delete(new Path(logRootDir, HConstants.HREGION_OLDLOGDIR_NAME), true);
}
@After
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
index 0d8e2ef..5e26fdc 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
@@ -384,6 +384,43 @@ public class TestFSUtils {
verifyFileInDirWithStoragePolicy("1772");
}
+ @Test
+ public void testSetLogRootDir() throws Exception {
+ HBaseTestingUtility htu = new HBaseTestingUtility();
+ Configuration conf = htu.getConfiguration();
+ Path p = new Path("file:///hbase/root");
+ FSUtils.setLogRootDir(conf, p);
+ assertEquals(p.toString(), conf.get(HConstants.HBASE_LOG_DIR));
+ }
+
+ @Test
+ public void testGetLogRootDir() throws Exception {
+ HBaseTestingUtility htu = new HBaseTestingUtility();
+ Configuration conf = htu.getConfiguration();
+ Path root = new Path("file:///hbase/root");
+ Path logRoot = new Path("file:///hbase/logroot");
+ FSUtils.setRootDir(conf, root);
+ assertEquals(FSUtils.getRootDir(conf), root);
+ assertEquals(FSUtils.getLogRootDir(conf), root);
+ FSUtils.setLogRootDir(conf, logRoot);
+ assertEquals(FSUtils.getLogRootDir(conf), logRoot);
+ }
+
+ @Test
+ public void testRemoveLogRootPath() throws Exception {
+ HBaseTestingUtility htu = new HBaseTestingUtility();
+ Configuration conf = htu.getConfiguration();
+ FSUtils.setRootDir(conf, new Path("file:///user/hbase"));
+ Path testFile = new Path(FSUtils.getRootDir(conf), "test/testfile");
+ Path tmpFile = new Path("file:///test/testfile");
+ assertEquals(FSUtils.removeLogRootPath(testFile, conf), "test/testfile");
+ assertEquals(FSUtils.removeLogRootPath(tmpFile, conf), tmpFile.toString());
+ FSUtils.setLogRootDir(conf, new Path("file:///user/hbaseLogDir"));
+ assertEquals(FSUtils.removeLogRootPath(testFile, conf), testFile.toString());
+ Path logFile = new Path(FSUtils.getLogRootDir(conf), "test/testlog");
+ assertEquals(FSUtils.removeLogRootPath(logFile, conf), "test/testlog");
+ }
+
/**
* Ugly test that ensures we can get at the hedged read counters in dfsclient.
* Does a bit of preading with hedged reads enabled using code taken from hdfs TestPread.
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java
index ec5f037..faab916 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java
@@ -100,7 +100,7 @@ public class IOTestProvider implements WALProvider {
providerId = DEFAULT_PROVIDER_ID;
}
final String logPrefix = factory.factoryId + WAL_FILE_NAME_DELIMITER + providerId;
- log = new IOTestWAL(FileSystem.get(conf), FSUtils.getRootDir(conf),
+ log = new IOTestWAL(FileSystem.get(conf), FSUtils.getLogRootDir(conf),
AbstractFSWALProvider.getWALDirectoryName(factory.factoryId),
HConstants.HREGION_OLDLOGDIR_NAME, conf, listeners, true, logPrefix,
META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null);
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java
index b95176b..f03272a 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java
@@ -83,6 +83,7 @@ public class TestWALFactory {
private static MiniDFSCluster cluster;
protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
protected static Path hbaseDir;
+ protected static Path hbaseLogDir;
protected FileSystem fs;
protected Path dir;
@@ -141,6 +142,7 @@ public class TestWALFactory {
cluster = TEST_UTIL.getDFSCluster();
hbaseDir = TEST_UTIL.createRootDir();
+ hbaseLogDir = TEST_UTIL.createLogRootDir();
}
@AfterClass
@@ -163,12 +165,12 @@ public class TestWALFactory {
final TableName tableName = TableName.valueOf(currentTest.getMethodName());
final byte [] rowName = tableName.getName();
final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
- final Path logdir = new Path(hbaseDir,
+ final Path logdir = new Path(hbaseLogDir,
AbstractFSWALProvider.getWALDirectoryName(currentTest.getMethodName()));
- Path oldLogDir = new Path(hbaseDir, HConstants.HREGION_OLDLOGDIR_NAME);
+ Path oldLogDir = new Path(hbaseLogDir, HConstants.HREGION_OLDLOGDIR_NAME);
final int howmany = 3;
HRegionInfo[] infos = new HRegionInfo[3];
- Path tabledir = FSUtils.getTableDir(hbaseDir, tableName);
+ Path tabledir = FSUtils.getTableDir(hbaseLogDir, tableName);
fs.mkdirs(tabledir);
for(int i = 0; i < howmany; i++) {
infos[i] = new HRegionInfo(tableName,
@@ -207,7 +209,7 @@ public class TestWALFactory {
}
}
wals.shutdown();
- List splits = WALSplitter.split(hbaseDir, logdir, oldLogDir, fs, conf, wals);
+ List splits = WALSplitter.split(hbaseLogDir, logdir, oldLogDir, fs, conf, wals);
verifySplits(splits, howmany);
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALLogDir.java hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALLogDir.java
new file mode 100644
index 0000000..514ec2c
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALLogDir.java
@@ -0,0 +1,150 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.wal;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+
+@Category(MediumTests.class)
+public class TestWALLogDir {
+ private static final Log LOG = LogFactory.getLog(TestWALLogDir.class);
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static Configuration conf;
+ private static FileSystem fs;
+ private static FileSystem logFs;
+ static final TableName tableName = TableName.valueOf("TestWALLogDir");
+ private static final byte [] rowName = Bytes.toBytes("row");
+ private static final byte [] family = Bytes.toBytes("column");
+ private static HTableDescriptor htd;
+ private static Path logRootDir;
+ private static Path rootDir;
+ private static WALFactory wals;
+
+ @Before
+ public void setUp() throws Exception {
+ cleanup();
+ }
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ conf = TEST_UTIL.getConfiguration();
+ TEST_UTIL.startMiniDFSCluster(1);
+ rootDir = TEST_UTIL.createRootDir();
+ logRootDir = TEST_UTIL.createLogRootDir();
+ fs = FSUtils.getRootDirFileSystem(conf);
+ logFs = FSUtils.getLogRootDirFileSystem(conf);
+ htd = new HTableDescriptor(tableName);
+ htd.addFamily(new HColumnDescriptor(family));
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ cleanup();
+ TEST_UTIL.shutdownMiniDFSCluster();
+ }
+
+ @Test
+ public void testWALRootDir() throws Exception {
+ FileSystem logFs = logRootDir.getFileSystem(conf);
+ HRegionInfo regionInfo = new HRegionInfo(tableName);
+ wals = new WALFactory(conf, null, "testWALRootDir");
+ WAL log = wals.getWAL(regionInfo.getEncodedNameAsBytes(), regionInfo.getTable().getNamespace());
+
+ assertEquals(1, getWALFiles(fs, rootDir).size());
+ byte [] value = Bytes.toBytes("value");
+ WALEdit edit = new WALEdit();
+ edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"),
+ System.currentTimeMillis(), value));
+ long txid = log.append(regionInfo, getWalKey(System.currentTimeMillis(), regionInfo), edit, true);
+ log.sync(txid);
+ assertEquals("Expect 1 log have been created", 1, getWALFiles(logFs, logRootDir).size());
+ log.rollWriter();
+ //Create 1 more WAL, and put old WAL into oldwal dir
+ assertEquals(1, getWALFiles(logFs, new Path(logRootDir, HConstants.HREGION_OLDLOGDIR_NAME)).size());
+ assertEquals(1, getWALFiles(logFs, new Path(logRootDir, HConstants.HREGION_LOGDIR_NAME)).size());
+ edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"),
+ System.currentTimeMillis(), value));
+ txid = log.append(regionInfo, getWalKey(System.currentTimeMillis(), regionInfo), edit, true);
+ log.sync(txid);
+ log.rollWriter();
+ log.shutdown();
+
+ assertEquals("Expect 2 logs in oldWALs dir", 2, getWALFiles(logFs, new Path(logRootDir, HConstants.HREGION_OLDLOGDIR_NAME)).size());
+ assertEquals("Expect 1 logs in WALs dir", 1, getWALFiles(logFs, new Path(logRootDir, HConstants.HREGION_LOGDIR_NAME)).size());
+ }
+
+ protected WALKey getWalKey(final long time, HRegionInfo hri) {
+ return new WALKey(hri.getEncodedNameAsBytes(), tableName, time);
+ }
+
+ private List getWALFiles(FileSystem fs, Path dir)
+ throws IOException {
+ List result = new ArrayList();
+ LOG.debug("Scanning " + dir.toString() + " for WAL files");
+
+ FileStatus[] files = fs.listStatus(dir);
+ if (files == null) return Collections.emptyList();
+ for (FileStatus file : files) {
+ if (file.isDirectory()) {
+ // recurse into sub directories
+ result.addAll(getWALFiles(fs, file.getPath()));
+ } else {
+ String name = file.getPath().toString();
+ if (!name.startsWith(".")) {
+ result.add(file);
+ }
+ }
+ }
+ return result;
+ }
+
+ private static void cleanup() throws Exception{
+ logFs.delete(logRootDir, true);
+ fs.delete(rootDir, true);
+ }
+
+}
+
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
index 055e4f5..05afe5f 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java
@@ -119,6 +119,7 @@ public class TestWALSplit {
protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private Path HBASEDIR;
+ private Path HBASELOGDIR;
private Path WALDIR;
private Path OLDLOGDIR;
private Path CORRUPTDIR;
@@ -181,8 +182,9 @@ public class TestWALSplit {
LOG.info("Cleaning up cluster for new test.");
fs = TEST_UTIL.getDFSCluster().getFileSystem();
HBASEDIR = TEST_UTIL.createRootDir();
- OLDLOGDIR = new Path(HBASEDIR, HConstants.HREGION_OLDLOGDIR_NAME);
- CORRUPTDIR = new Path(HBASEDIR, HConstants.CORRUPT_DIR_NAME);
+ HBASELOGDIR = TEST_UTIL.createLogRootDir();
+ OLDLOGDIR = new Path(HBASELOGDIR, HConstants.HREGION_OLDLOGDIR_NAME);
+ CORRUPTDIR = new Path(HBASELOGDIR, HConstants.CORRUPT_DIR_NAME);
TABLEDIR = FSUtils.getTableDir(HBASEDIR, TABLE_NAME);
REGIONS.clear();
Collections.addAll(REGIONS, "bbb", "ccc");
@@ -190,7 +192,7 @@ public class TestWALSplit {
this.mode = (conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false) ?
RecoveryMode.LOG_REPLAY : RecoveryMode.LOG_SPLITTING);
wals = new WALFactory(conf, null, name.getMethodName());
- WALDIR = new Path(HBASEDIR, AbstractFSWALProvider.getWALDirectoryName(name.getMethodName()));
+ WALDIR = new Path(HBASELOGDIR, AbstractFSWALProvider.getWALDirectoryName(name.getMethodName()));
//fs.mkdirs(WALDIR);
}
@@ -206,6 +208,7 @@ public class TestWALSplit {
} finally {
wals = null;
fs.delete(HBASEDIR, true);
+ fs.delete(HBASELOGDIR, true);
}
}
@@ -1114,7 +1117,7 @@ public class TestWALSplit {
useDifferentDFSClient();
WALSplitter.split(HBASEDIR, WALDIR, OLDLOGDIR, fs, conf, wals);
- final Path corruptDir = new Path(FSUtils.getRootDir(conf), HConstants.CORRUPT_DIR_NAME);
+ final Path corruptDir = new Path(FSUtils.getLogRootDir(conf), HConstants.CORRUPT_DIR_NAME);
assertEquals(1, fs.listStatus(corruptDir).length);
}