diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 0c439c7..2a0b0bb 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -274,17 +274,16 @@ public final class HConstants {
/** Default HBase client operation timeout, which is tantamount to a blocking call */
public static final int DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT = Integer.MAX_VALUE;
- /** Used to construct the name of the log directory for a region server
- * Use '.' as a special character to seperate the log files from table data */
- public static final String HREGION_LOGDIR_NAME = ".logs";
+ /** Used to construct the name of the log directory for a region server */
+ public static final String HREGION_LOGDIR_NAME = "WALs";
/** Used to construct the name of the splitlog directory for a region server */
- public static final String SPLIT_LOGDIR_NAME = "splitlog";
-
- public static final String CORRUPT_DIR_NAME = ".corrupt";
+ public static final String SPLIT_LOGDIR_NAME = "splitWAL";
/** Like the previous, but for old logs that are about to be deleted */
- public static final String HREGION_OLDLOGDIR_NAME = ".oldlogs";
+ public static final String HREGION_OLDLOGDIR_NAME = "oldWALs";
+
+ public static final String CORRUPT_DIR_NAME = "corrupt";
/** Used by HBCK to sideline backup data */
public static final String HBCK_SIDELINEDIR_NAME = ".hbck";
@@ -352,7 +351,7 @@ public final class HConstants {
// be the first to be reassigned if the server(s) they are being served by
// should go down.
- public static final String BASE_NAMESPACE_DIR = ".data";
+ public static final String BASE_NAMESPACE_DIR = "data";
/** delimiter used between portions of a region name */
public static final int META_ROW_DELIMITER = ',';
@@ -815,7 +814,7 @@ public final class HConstants {
public static final int REPLAY_QOS = 6; // REPLICATION_QOS < REPLAY_QOS < high_QOS
/** Directory under /hbase where archived hfiles are stored */
- public static final String HFILE_ARCHIVE_DIRECTORY = ".archive";
+ public static final String HFILE_ARCHIVE_DIRECTORY = "archive";
/**
* Name of the directory to store all snapshots. See SnapshotDescriptionUtils for
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
index 9b0107d..9ed43f9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
@@ -19,7 +19,10 @@
*/
package org.apache.hadoop.hbase.migration;
-import com.google.common.collect.Lists;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -36,8 +39,7 @@ import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.util.Tool;
-import java.io.IOException;
-import java.util.List;
+import com.google.common.collect.Lists;
/**
* Upgrades old 0.94 filesystem layout to namespace layout
@@ -46,6 +48,8 @@ import java.util.List;
* - creates system namespace directory and move .META. table there
* renaming .META. table to hbase:meta,
* this in turn would require to re-encode the region directory name
+ *
+ *
The pre-0.96 paths and dir names are hardcoded in here.
*/
public class NamespaceUpgrade implements Tool {
private static final Log LOG = LogFactory.getLog(NamespaceUpgrade.class);
@@ -58,26 +62,44 @@ public class NamespaceUpgrade implements Tool {
private Path sysNsDir;
private Path defNsDir;
private Path baseDirs[];
+ // First move everything to this tmp .data dir in case there is a table named 'data'
+ private static final String TMP_DATA_DIR = ".data";
+ // Old dir names to migrate.
+ private static final String DOT_LOGS = ".logs";
+ private static final String DOT_OLD_LOGS = ".oldlogs";
+ private static final String DOT_CORRUPT = ".corrupt";
+ private static final String DOT_SPLITLOG = "splitlog";
+ private static final String DOT_ARCHIVE = ".archive";
+ /** Directories that are not HBase table directories */
+ static final List NON_USER_TABLE_DIRS = Arrays.asList(new String[] {
+ DOT_LOGS,
+ DOT_OLD_LOGS,
+ DOT_CORRUPT,
+ DOT_SPLITLOG,
+ HConstants.HBCK_SIDELINEDIR_NAME,
+ DOT_ARCHIVE,
+ HConstants.SNAPSHOT_DIR_NAME,
+ HConstants.HBASE_TEMP_DIRECTORY,
+ TMP_DATA_DIR});
public NamespaceUpgrade() throws IOException {
+ super();
}
public void init() throws IOException {
this.rootDir = FSUtils.getRootDir(conf);
this.fs = FileSystem.get(conf);
- sysNsDir = FSUtils.getNamespaceDir(rootDir, NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR);
- defNsDir = FSUtils.getNamespaceDir(rootDir, NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR);
+ Path tmpDataDir = new Path(rootDir, TMP_DATA_DIR);
+ sysNsDir = new Path(tmpDataDir, NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR);
+ defNsDir = new Path(tmpDataDir, NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR);
baseDirs = new Path[]{rootDir,
new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY),
new Path(rootDir, HConstants.HBASE_TEMP_DIRECTORY)};
}
- public void upgradeTableDirs()
- throws IOException, DeserializationException {
-
-
- //if new version is written then upgrade is done
+ public void upgradeTableDirs() throws IOException, DeserializationException {
+ // if new version is written then upgrade is done
if (verifyNSUpgrade(fs, rootDir)) {
return;
}
@@ -90,9 +112,75 @@ public class NamespaceUpgrade implements Tool {
migrateMeta();
+ migrateDotDirs();
+
+ deleteRoot();
+
FSUtils.setVersion(fs, rootDir);
}
+ /**
+ * Remove the -ROOT- dir. No longer of use.
+ * @throws IOException
+ */
+ public void deleteRoot() throws IOException {
+ Path rootDir = new Path(this.rootDir, "-ROOT-");
+ if (this.fs.exists(rootDir)) {
+ if (!this.fs.delete(rootDir, true)) LOG.info("Failed remove of " + rootDir);
+ LOG.info("Deleted " + rootDir);
+ }
+ }
+
+ /**
+ * Rename all the dot dirs -- .data, .archive, etc. -- as data, archive, etc.; i.e. minus the dot.
+ * @throws IOException
+ */
+ public void migrateDotDirs() throws IOException {
+ // Dot dirs to rename. Leave the tmp dir named '.tmp' and snapshots as .hbase-snapshot.
+ final Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
+ Path [][] dirs = new Path[][] {
+ new Path [] {new Path(rootDir, DOT_CORRUPT), new Path(rootDir, HConstants.CORRUPT_DIR_NAME)},
+ new Path [] {new Path(rootDir, DOT_LOGS), new Path(rootDir, HConstants.HREGION_LOGDIR_NAME)},
+ new Path [] {new Path(rootDir, DOT_OLD_LOGS),
+ new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME)},
+ new Path [] {new Path(rootDir, TMP_DATA_DIR),
+ new Path(rootDir, HConstants.BASE_NAMESPACE_DIR)}};
+ for (Path [] dir: dirs) {
+ Path src = dir[0];
+ Path tgt = dir[1];
+ if (!this.fs.exists(src)) {
+ LOG.info("Does not exist: " + src);
+ continue;
+ }
+ rename(src, tgt);
+ }
+ // Do the .archive dir. Need to move its subdirs to the default ns dir under data dir... so
+ // from '.archive/foo', to 'archive/data/default/foo'.
+ Path oldArchiveDir = new Path(rootDir, DOT_ARCHIVE);
+ if (this.fs.exists(oldArchiveDir)) {
+ // This is a pain doing two nn calls but portable over h1 and h2.
+ mkdirs(archiveDir);
+ Path archiveDataDir = new Path(archiveDir, HConstants.BASE_NAMESPACE_DIR);
+ mkdirs(archiveDataDir);
+ rename(oldArchiveDir, new Path(archiveDataDir,
+ NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR));
+ }
+ }
+
+ private void mkdirs(final Path p) throws IOException {
+ if (!this.fs.mkdirs(p)) throw new IOException("Failed make of " + p);
+ }
+
+ private void rename(final Path src, final Path tgt) throws IOException {
+ if (!fs.rename(src, tgt)) {
+ throw new IOException("Failed move " + src + " to " + tgt);
+ }
+ }
+
+ /**
+ * Create the system and default namespaces dirs
+ * @throws IOException
+ */
public void makeNamespaceDirs() throws IOException {
if (!fs.exists(sysNsDir)) {
if (!fs.mkdirs(sysNsDir)) {
@@ -106,27 +194,34 @@ public class NamespaceUpgrade implements Tool {
}
}
+ /**
+ * Migrate all tables into respective namespaces, either default or system. We put them into
+ * a temporary location, '.data', in case a user table is name 'data'. In a later method we will
+ * move stuff from .data to data.
+ * @throws IOException
+ */
public void migrateTables() throws IOException {
List sysTables = Lists.newArrayList("-ROOT-",".META.");
- //migrate tables including archive and tmp
- for(Path baseDir: baseDirs) {
+ // Migrate tables including archive and tmp
+ for (Path baseDir: baseDirs) {
if (!fs.exists(baseDir)) continue;
List oldTableDirs = FSUtils.getLocalTableDirs(fs, baseDir);
- for(Path oldTableDir: oldTableDirs) {
- if (!sysTables.contains(oldTableDir.getName())) {
- Path nsDir = FSUtils.getTableDir(baseDir,
- TableName.valueOf(oldTableDir.getName()));
- if(!fs.exists(nsDir.getParent())) {
- if(!fs.mkdirs(nsDir.getParent())) {
- throw new IOException("Failed to create namespace dir "+nsDir.getParent());
- }
+ for (Path oldTableDir: oldTableDirs) {
+ if (NON_USER_TABLE_DIRS.contains(oldTableDir.getName())) continue;
+ if (sysTables.contains(oldTableDir.getName())) continue;
+ // Make the new directory under the ns to which we will move the table.
+ Path nsDir = new Path(this.defNsDir,
+ TableName.valueOf(oldTableDir.getName()).getQualifierAsString());
+ if (!fs.exists(nsDir.getParent())) {
+ if (!fs.mkdirs(nsDir.getParent())) {
+ throw new IOException("Failed to create namespace dir "+nsDir.getParent());
}
- if (sysTables.indexOf(oldTableDir.getName()) < 0) {
- LOG.info("Migrating table " + oldTableDir.getName() + " to " + nsDir);
- if (!fs.rename(oldTableDir, nsDir)) {
- throw new IOException("Failed to move "+oldTableDir+" to namespace dir "+nsDir);
- }
+ }
+ if (sysTables.indexOf(oldTableDir.getName()) < 0) {
+ LOG.info("Migrating table " + oldTableDir.getName() + " to " + nsDir);
+ if (!fs.rename(oldTableDir, nsDir)) {
+ throw new IOException("Failed to move "+oldTableDir+" to namespace dir "+nsDir);
}
}
}
@@ -163,8 +258,9 @@ public class NamespaceUpgrade implements Tool {
}
public void migrateMeta() throws IOException {
- Path newMetaRegionDir = HRegion.getRegionDir(rootDir, HRegionInfo.FIRST_META_REGIONINFO);
- Path newMetaDir = FSUtils.getTableDir(rootDir, TableName.META_TABLE_NAME);
+ Path newMetaDir = new Path(this.sysNsDir, TableName.META_TABLE_NAME.getQualifierAsString());
+ Path newMetaRegionDir =
+ new Path(newMetaDir, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
Path oldMetaDir = new Path(rootDir, ".META.");
if (fs.exists(oldMetaDir)) {
LOG.info("Migrating meta table " + oldMetaDir.getName() + " to " + newMetaDir);
@@ -174,10 +270,9 @@ public class NamespaceUpgrade implements Tool {
}
}
- //since meta table name has changed
- //rename meta region dir from it's old encoding to new one
+ // Since meta table name has changed rename meta region dir from it's old encoding to new one
Path oldMetaRegionDir = HRegion.getRegionDir(rootDir,
- new Path(newMetaDir, "1028785192").toString());
+ new Path(newMetaDir, "1028785192").toString());
if (fs.exists(oldMetaRegionDir)) {
LOG.info("Migrating meta region " + oldMetaRegionDir + " to " + newMetaRegionDir);
if (!fs.rename(oldMetaRegionDir, newMetaRegionDir)) {
@@ -199,7 +294,7 @@ public class NamespaceUpgrade implements Tool {
@Override
public int run(String[] args) throws Exception {
- if(args.length < 1 || !args[0].equals("--upgrade")) {
+ if (args.length < 1 || !args[0].equals("--upgrade")) {
System.out.println("Usage: --upgrade");
return 0;
}
@@ -217,4 +312,4 @@ public class NamespaceUpgrade implements Tool {
public Configuration getConf() {
return conf;
}
-}
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
index 90216d5..fd7276c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.migration.NamespaceUpgrade;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java
index e3a3f46..c70cacb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java
@@ -19,12 +19,14 @@
*/
package org.apache.hadoop.hbase.migration;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
import java.io.File;
import java.io.IOException;
-
-import junit.framework.Assert;
+import java.util.ArrayList;
+import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -47,6 +49,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.util.ToolRunner;
import org.junit.AfterClass;
+import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -56,12 +59,12 @@ import org.junit.experimental.categories.Category;
* Mainly tests that tables are migrated and consistent. Also verifies
* that snapshots have been migrated correctly.
*
- * Uses a tarball which is an image of an 0.94 hbase.rootdir.
+ * Uses a tarball which is an image of an 0.94 hbase.rootdir.
*
- * Contains tables with currentKeys as the stored keys:
+ *
Contains tables with currentKeys as the stored keys:
* foo, ns1.foo, ns2.foo
*
- * Contains snapshots with snapshot{num}Keys as the contents:
+ *
Contains snapshots with snapshot{num}Keys as the contents:
* snapshot1Keys, snapshot2Keys
*
*/
@@ -103,7 +106,6 @@ public class TestNamespaceUpgrade {
Configuration toolConf = TEST_UTIL.getConfiguration();
conf.set(HConstants.HBASE_DIR, TEST_UTIL.getDefaultRootDirPath().toString());
ToolRunner.run(toolConf, new NamespaceUpgrade(), new String[]{"--upgrade"});
-
assertTrue(FSUtils.getVersion(fs, hbaseRootDir).equals(HConstants.FILE_SYSTEM_VERSION));
TEST_UTIL.startMiniHBaseCluster(1, 1);
@@ -154,7 +156,7 @@ public class TestNamespaceUpgrade {
@Test
public void testSnapshots() throws IOException, InterruptedException {
String snapshots[][] = {snapshot1Keys, snapshot2Keys};
- for(int i=1; i<=snapshots.length; i++) {
+ for(int i = 1; i <= snapshots.length; i++) {
for(String table: tables) {
TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot"+i, table+"_clone"+i);
FSUtils.logFileSystemState(FileSystem.get(TEST_UTIL.getConfiguration()),
@@ -171,7 +173,7 @@ public class TestNamespaceUpgrade {
}
@Test
- public void testRenameUsingSnapshots() throws IOException, InterruptedException {
+ public void testRenameUsingSnapshots() throws Exception {
String newNS = "newNS";
TEST_UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(newNS).build());
for(String table: tables) {
@@ -180,10 +182,9 @@ public class TestNamespaceUpgrade {
Scan())) {
assertEquals(currentKeys[count++], Bytes.toString(res.getRow()));
}
- TEST_UTIL.getHBaseAdmin().snapshot(table+"_snapshot3", table);
- final String newTableName =
- newNS+ TableName.NAMESPACE_DELIM+table+"_clone3";
- TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot3", newTableName);
+ TEST_UTIL.getHBaseAdmin().snapshot(table + "_snapshot3", table);
+ final String newTableName = newNS + TableName.NAMESPACE_DELIM + table + "_clone3";
+ TEST_UTIL.getHBaseAdmin().cloneSnapshot(table + "_snapshot3", newTableName);
Thread.sleep(1000);
count = 0;
for(Result res: new HTable(TEST_UTIL.getConfiguration(), newTableName).getScanner(new
@@ -211,12 +212,12 @@ public class TestNamespaceUpgrade {
String nextNS = "nextNS";
TEST_UTIL.getHBaseAdmin().createNamespace(NamespaceDescriptor.create(nextNS).build());
for(String table: tables) {
- String srcTable = newNS+TableName.NAMESPACE_DELIM+table+"_clone3";
- TEST_UTIL.getHBaseAdmin().snapshot(table+"_snapshot4", srcTable);
- String newTableName = nextNS+TableName.NAMESPACE_DELIM+table+"_clone4";
+ String srcTable = newNS + TableName.NAMESPACE_DELIM + table + "_clone3";
+ TEST_UTIL.getHBaseAdmin().snapshot(table + "_snapshot4", srcTable);
+ String newTableName = nextNS + TableName.NAMESPACE_DELIM + table + "_clone4";
TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot4", newTableName);
- FSUtils.logFileSystemState(TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath()
- , LOG);
+ FSUtils.logFileSystemState(TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath(),
+ LOG);
int count = 0;
for(Result res: new HTable(TEST_UTIL.getConfiguration(), newTableName).getScanner(new
Scan())) {
@@ -224,7 +225,31 @@ public class TestNamespaceUpgrade {
}
Assert.assertEquals(newTableName, currentKeys.length, count);
}
+ }
+ @Test
+ public void testOldDirsAreGonePostMigration() throws IOException {
+ FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
+ Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
+ List dirs = new ArrayList(NamespaceUpgrade.NON_USER_TABLE_DIRS);
+ // Remove those that are not renamed
+ dirs.remove(HConstants.HBCK_SIDELINEDIR_NAME);
+ dirs.remove(HConstants.SNAPSHOT_DIR_NAME);
+ dirs.remove(HConstants.HBASE_TEMP_DIRECTORY);
+ for (String dir: dirs) {
+ assertFalse(fs.exists(new Path(hbaseRootDir, dir)));
+ }
}
-}
+ @Test
+ public void testNewDirsArePresentPostMigration() throws IOException {
+ FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
+ // Below list does not include 'corrupt' because there is no 'corrupt' in the tgz
+ String [] newdirs = new String [] {HConstants.BASE_NAMESPACE_DIR,
+ HConstants.HREGION_LOGDIR_NAME};
+ Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
+ for (String dir: newdirs) {
+ assertTrue(dir, fs.exists(new Path(hbaseRootDir, dir)));
+ }
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java
index 5b148a7..4226e94 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java
@@ -726,17 +726,18 @@ public class TestHLog {
Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, hl + "qdf"));
Assert.assertNull(HLogUtil.getServerNameFromHLogDirectoryName(conf, "sfqf" + hl + "qdf"));
+ final String wals = "/WALs/";
ServerName parsed = HLogUtil.getServerNameFromHLogDirectoryName(conf,
- FSUtils.getRootDir(conf).toUri().toString() +
- "/.logs/" + sn + "/localhost%2C32984%2C1343316388997.1343316390417");
+ FSUtils.getRootDir(conf).toUri().toString() + wals + sn +
+ "/localhost%2C32984%2C1343316388997.1343316390417");
Assert.assertEquals("standard", sn, parsed);
parsed = HLogUtil.getServerNameFromHLogDirectoryName(conf, hl + "/qdf");
Assert.assertEquals("subdir", sn, parsed);
parsed = HLogUtil.getServerNameFromHLogDirectoryName(conf,
- FSUtils.getRootDir(conf).toUri().toString() +
- "/.logs/" + sn + "-splitting/localhost%3A57020.1340474893931");
+ FSUtils.getRootDir(conf).toUri().toString() + wals + sn +
+ "-splitting/localhost%3A57020.1340474893931");
Assert.assertEquals("split", sn, parsed);
}