Hbase runs on top of Hadoop. Hadoop has its own set of
requirements and instructions. Make sure to set
JAVA_HOME to the root of your Java installation when configuring Hadoop.
@@ -17,13 +17,19 @@
+What follows presumes you are installing hbase for the first time. If upgrading your
+hbase instance, see Upgrading.
+
+
Start by defining the following directory variables for your convenience:
+
${HADOOP_HOME}: The root directory of your Hadoop installation.
-${HBASE_HOME}: The HBase root, located at
+${HBASE_HOME}: The Hbase root, located at
${HADOOP_HOME}/src/contrib/hbase.
+
If you are running a standalone operation, proceed to Running
and Confirming Your Installation. If you are running a distributed operation, continue below.
@@ -34,7 +40,7 @@
Make sure you have followed
Hadoop's instructions for running a distributed operation.
-Configuring HBase for a distributed operation requires modification of the following two
+Configuring Hbase for a distributed operation requires modification of the following two
files: ${HBASE_HOME}/conf/hbase-site.xml and
${HBASE_HOME}/conf/regionservers.
@@ -44,7 +50,7 @@
should never be modified). At a minimum the hbase.master property should be redefined
in hbase-site.xml to define the host:port pair on which to run the
HMaster (read about the
-HBase master, regionservers, etc):
+Hbase master, regionservers, etc):
<configuration>
@@ -52,7 +58,7 @@
<property>
<name>hbase.master</name>
<value>[YOUR_HOST]:[PORT]</value>
- <description>The host and port that the HBase master runs at.
+ <description>The host and port that the Hbase master runs at.
</description>
</property>
@@ -65,8 +71,8 @@
-- Hadoop and HBase must be set up on each host you plan to use.
-- Additional (optional) HBase-specific variables such as HBASE_HEAPSIZE and HBASE_CLASSPATH
+
- Hadoop and Hbase must be set up on each host you plan to use.
+- Additional (optional) Hbase-specific variables such as HBASE_HEAPSIZE and HBASE_CLASSPATH
can be set in
${HBASE_HOME}/conf/hbase-env.sh.
@@ -74,42 +80,49 @@
If you are running in standalone, non-distributed mode, hbase by default uses
the local filesystem.
If you are running a distributed cluster you will need to start the Hadoop DFS daemons
-before starting HBase and stop the daemons after HBase has shut down. Start and
+before starting Hbase and stop the daemons after Hbase has shut down. Start and
stop the Hadoop DFS daemons as per the Hadoop
-instructions. HBase
+instructions. Hbase
does not normally use the mapreduce daemons. These do not need to be started.
-Start HBase with the following command:
+
Start Hbase with the following command:
${HBASE_HOME}/bin/start-hbase.sh
-Once HBase has started, enter ${HBASE_HOME}/bin/hbase shell to obtain a
-shell against HBase from which you can execute HBase commands. In the HBase shell, type
-help; to see a list of supported commands. Note that all commands in the HBase
+Once Hbase has started, enter ${HBASE_HOME}/bin/hbase shell to obtain a
+shell against Hbase from which you can execute Hbase commands. In the Hbase shell, type
+help; to see a list of supported commands. Note that all commands in the Hbase
shell must end with ;. Test your installation by creating, viewing, and dropping
a table, as per the help instructions. Be patient with the create and
drop operations as they may each take 10 seconds or more. To stop hbase, exit the
-HBase shell and enter:
+Hbase shell and enter:
${HBASE_HOME}/bin/stop-hbase.sh
-If you are running a distributed operation, be sure to wait until HBase has shut down completely
+If you are running a distributed operation, be sure to wait until Hbase has shut down completely
before stopping the Hadoop daemons.
The default location for logs is ${HADOOP_HOME}/logs.
-HBase also puts up a UI listing vital attributes. By default its deployed on the master host
+
Hbase also puts up a UI listing vital attributes. By default its deployed on the master host
at port 60010.
+
+After installing the new hbase, before starting your cluster, run the
+${HBASE_DIR}/bin/hbase migrate migration script. It will make any
+adjustments to the filesystem data under hbase.rootdir necessary to run
+the hbase version.
+
+
Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java
===================================================================
--- src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (revision 613773)
+++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (working copy)
@@ -1870,7 +1870,7 @@
* @param info HRegionInfo for the region
* @return qualified path of region directory
*/
- static Path getRegionDir(final Path rootdir, final HRegionInfo info) {
+ public static Path getRegionDir(final Path rootdir, final HRegionInfo info) {
return new Path(
HTableDescriptor.getTableDir(rootdir, info.getTableDesc().getName()),
info.getEncodedName()
Index: src/contrib/hbase/src/java/org/apache/hadoop/hbase/util/Migrate.java
===================================================================
--- src/contrib/hbase/src/java/org/apache/hadoop/hbase/util/Migrate.java (revision 613773)
+++ src/contrib/hbase/src/java/org/apache/hadoop/hbase/util/Migrate.java (working copy)
@@ -21,6 +21,7 @@
package org.apache.hadoop.hbase.util;
import java.io.BufferedReader;
+import java.io.FileNotFoundException;
import java.io.InputStreamReader;
import java.io.IOException;
@@ -32,13 +33,9 @@
import java.util.TreeMap;
import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.CommandLineParser;
-import org.apache.commons.cli.GnuParser;
-import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -76,8 +73,9 @@
private final HBaseConfiguration conf;
- /** Action to take when an extra file is found */
- private static enum EXTRA_FILES {
+ /** Action to take when an extra file or unrecoverd log file is found */
+ private static String ACTIONS = "abort|ignore|delete|prompt";
+ private static enum ACTION {
/** Stop conversion */
ABORT,
/** print a warning message, but otherwise ignore */
@@ -88,19 +86,22 @@
PROMPT
}
- private static final Map options =
- new HashMap();
+ private static final Map options =
+ new HashMap();
static {
- options.put("abort", EXTRA_FILES.ABORT);
- options.put("ignore", EXTRA_FILES.IGNORE);
- options.put("delete", EXTRA_FILES.DELETE);
- options.put("prompt", EXTRA_FILES.PROMPT);
+ options.put("abort", ACTION.ABORT);
+ options.put("ignore", ACTION.IGNORE);
+ options.put("delete", ACTION.DELETE);
+ options.put("prompt", ACTION.PROMPT);
}
-
- private EXTRA_FILES logFiles = EXTRA_FILES.ABORT;
- private EXTRA_FILES otherFiles = EXTRA_FILES.IGNORE;
+ private boolean readOnly = false;
+ private boolean migrationNeeded = false;
+ private boolean newRootRegion = false;
+ private ACTION logFiles = ACTION.IGNORE;
+ private ACTION otherFiles = ACTION.IGNORE;
+
private BufferedReader reader = null;
private final Set references = new HashSet();
@@ -121,55 +122,87 @@
/** {@inheritDoc} */
public int run(String[] args) throws Exception {
- parseArgs(args);
-
+ if (parseArgs(args) != 0) {
+ return -1;
+ }
+
+ LOG.info("Verifying that HBase is not running...");
try {
HBaseAdmin admin = new HBaseAdmin(conf);
if (admin.isMasterRunning()) {
throw new IllegalStateException(
- "HBase cluster must be off-line while being upgraded");
+ "HBase cluster must be off-line while being upgraded");
}
} catch (MasterNotRunningException e) {
// ignore
}
+
+ LOG.info("Starting migration" + (readOnly ? " check" : ""));
+
FileSystem fs = FileSystem.get(conf); // get DFS handle
- Path rootdir = fs.makeQualified(new Path( // get path for instance
+ Path rootdir = fs.makeQualified(new Path( // get HBase root dir
conf.get(HConstants.HBASE_DIR, HConstants.DEFAULT_HBASE_DIR)));
+ if (!fs.exists(rootdir)) {
+ throw new FileNotFoundException("HBase root directory " +
+ rootdir.toString() + " does not exist.");
+ }
+
// See if there is a file system version file
if (FSUtils.checkVersion(fs, rootdir)) {
- LOG.info("file system is at current level, no upgrade necessary");
+ LOG.info("No migration necessary");
return 0;
}
+ // check to see if new root region dir exists
+
+ checkNewRootRegionDirExists(fs, rootdir);
+
// check for "extra" files and for old upgradable regions
extraFiles(fs, rootdir);
- // find root region
+ if (!newRootRegion) {
+ // find root region
- Path rootRegion = new Path(rootdir,
- OLD_PREFIX + HRegionInfo.rootRegionInfo.getEncodedName());
+ Path rootRegion = new Path(rootdir,
+ OLD_PREFIX + HRegionInfo.rootRegionInfo.getEncodedName());
- if (!fs.exists(rootRegion)) {
- throw new IOException("cannot find root region " + rootRegion.toString());
- }
+ if (!fs.exists(rootRegion)) {
+ throw new IOException("Cannot find root region " +
+ rootRegion.toString());
+ } else if (readOnly) {
+ migrationNeeded = true;
+ } else {
+ migrateRegionDir(fs, rootdir, HConstants.ROOT_TABLE_NAME, rootRegion);
+ scanRootRegion(fs, rootdir);
- processRegionDir(fs, rootdir, HConstants.ROOT_TABLE_NAME, rootRegion);
- scanRootRegion(fs, rootdir);
+ // scan for left over regions
- // scan for left over regions
-
- extraRegions(fs, rootdir);
+ extraRegions(fs, rootdir);
+ }
+ }
- // set file system version
-
- FSUtils.setVersion(fs, rootdir);
+ if (!readOnly) {
+ // set file system version
+ LOG.info("Setting file system version");
+ FSUtils.setVersion(fs, rootdir);
+ } else if (migrationNeeded) {
+ LOG.info("Migration needed");
+ }
return 0;
}
-
+
+ private void checkNewRootRegionDirExists(FileSystem fs, Path rootdir)
+ throws IOException {
+ Path rootRegionDir =
+ HRegion.getRegionDir(rootdir, HRegionInfo.rootRegionInfo);
+ newRootRegion = fs.exists(rootRegionDir);
+ migrationNeeded = !newRootRegion;
+ }
+ // Check for files that should not be there or should be migrated
private void extraFiles(FileSystem fs, Path rootdir) throws IOException {
FileStatus[] stats = fs.listStatus(rootdir);
if (stats == null || stats.length == 0) {
@@ -178,44 +211,52 @@
}
for (int i = 0; i < stats.length; i++) {
String name = stats[i].getPath().getName();
- if (!name.startsWith(OLD_PREFIX)) {
+ if (name.startsWith(OLD_PREFIX)) {
+ if (!newRootRegion) {
+ // We need to migrate if the new root region directory doesn't exist
+ migrationNeeded = true;
+ String regionName = name.substring(OLD_PREFIX.length());
+ try {
+ Integer.parseInt(regionName);
+
+ } catch (NumberFormatException e) {
+ extraFile(otherFiles, "Old region format can not be converted: " +
+ name, fs, stats[i].getPath());
+ }
+ } else {
+ // Since the new root region directory exists, we assume that this
+ // directory is not necessary
+ extraFile(otherFiles, "Old region directory found: " + name, fs,
+ stats[i].getPath());
+ }
+ } else {
+ // File name does not start with "hregion_"
if (name.startsWith("log_")) {
- String message = "unrecovered region server log file " + name;
+ String message = "Unrecovered region server log file " + name +
+ " this file can be recovered by the master when it starts.";
extraFile(logFiles, message, fs, stats[i].getPath());
- } else {
- String message = "unrecognized file " + name;
+ } else if (!newRootRegion) {
+ // new root region directory does not exist. This is an extra file
+ String message = "Unrecognized file " + name;
extraFile(otherFiles, message, fs, stats[i].getPath());
}
- } else {
- String regionName = name.substring(OLD_PREFIX.length());
- try {
- Integer.parseInt(regionName);
-
- } catch (NumberFormatException e) {
- extraFile(otherFiles, "old region format can not be converted: " +
- name, fs, stats[i].getPath());
- }
}
}
}
- private void extraFile(EXTRA_FILES action, String message, FileSystem fs,
+ private void extraFile(ACTION action, String message, FileSystem fs,
Path p) throws IOException {
- if (action == EXTRA_FILES.ABORT) {
+ if (action == ACTION.ABORT) {
throw new IOException(message + " aborting");
-
- } else if (action == EXTRA_FILES.IGNORE) {
+ } else if (action == ACTION.IGNORE) {
LOG.info(message + " ignoring");
-
- } else if (action == EXTRA_FILES.DELETE) {
+ } else if (action == ACTION.DELETE) {
LOG.info(message + " deleting");
fs.delete(p);
-
} else {
- // logFiles == EXTRA_FILES.PROMPT
+ // ACTION.PROMPT
String response = prompt(message + " delete? [y/n]");
-
if (response.startsWith("Y") || response.startsWith("y")) {
LOG.info(message + " deleting");
fs.delete(p);
@@ -223,7 +264,7 @@
}
}
- private void processRegionDir(FileSystem fs, Path rootdir, Text tableName,
+ private void migrateRegionDir(FileSystem fs, Path rootdir, Text tableName,
Path oldPath) throws IOException {
// Create directory where table will live
@@ -300,7 +341,7 @@
// First move the meta region to where it should be and rename
// subdirectories as necessary
- processRegionDir(fs, rootdir, HConstants.META_TABLE_NAME,
+ migrateRegionDir(fs, rootdir, HConstants.META_TABLE_NAME,
new Path(rootdir, OLD_PREFIX + info.getEncodedName()));
// Now scan and process the meta table
@@ -348,7 +389,7 @@
// Move the region to where it should be and rename
// subdirectories as necessary
- processRegionDir(fs, rootdir, region.getTableDesc().getName(),
+ migrateRegionDir(fs, rootdir, region.getTableDesc().getName(),
new Path(rootdir, OLD_PREFIX + region.getEncodedName()));
results.clear();
@@ -376,11 +417,11 @@
String message;
if (references.contains(encodedName)) {
message =
- "region not in meta table but other regions reference it " + name;
+ "Region not in meta table but other regions reference it " + name;
} else {
message =
- "region not in meta table and no other regions reference it " + name;
+ "Region not in meta table and no other regions reference it " + name;
}
extraFile(otherFiles, message, fs, stats[i].getPath());
}
@@ -388,15 +429,15 @@
}
@SuppressWarnings("static-access")
- private void parseArgs(String[] args) {
+ private int parseArgs(String[] args) {
Options opts = new Options();
- Option logFiles = OptionBuilder.withArgName("abort|ignore|delete|prompt")
+ Option logFiles = OptionBuilder.withArgName(ACTIONS)
.hasArg()
.withDescription(
"disposition of unrecovered region server logs: {abort|ignore|delete|prompt}")
.create("logfiles");
- Option extraFiles = OptionBuilder.withArgName("abort|ignore|delete|prompt")
+ Option extraFiles = OptionBuilder.withArgName(ACTIONS)
.hasArg()
.withDescription("disposition of 'extra' files: {abort|ignore|delete|prompt}")
.create("extrafiles");
@@ -404,23 +445,64 @@
opts.addOption(logFiles);
opts.addOption(extraFiles);
- CommandLineParser parser = new GnuParser();
- try {
- CommandLine commandLine = parser.parse(opts, args, true);
+ GenericOptionsParser parser =
+ new GenericOptionsParser(this.getConf(), opts, args);
+
+ String[] remainingArgs = parser.getRemainingArgs();
+ if (remainingArgs.length != 1) {
+ usage();
+ return -1;
+ }
+ if (remainingArgs[0].compareTo("check") == 0) {
+ this.readOnly = true;
+ } else if (remainingArgs[0].compareTo("migrate") != 0) {
+ usage();
+ return -1;
+ }
+
+ if (readOnly) {
+ this.logFiles = ACTION.IGNORE;
+ this.otherFiles = ACTION.IGNORE;
+
+ } else {
+ CommandLine commandLine = parser.getCommandLine();
+
+ ACTION action = null;
if (commandLine.hasOption("log-files")) {
- this.logFiles = options.get(commandLine.getOptionValue("log-files"));
+ action = options.get(commandLine.getOptionValue("log-files"));
+ if (action == null) {
+ usage();
+ return -1;
+ }
+ this.logFiles = action;
}
if (commandLine.hasOption("extra-files")) {
- this.otherFiles = options.get(commandLine.getOptionValue("extra-files"));
+ action = options.get(commandLine.getOptionValue("extra-files"));
+ if (action == null) {
+ usage();
+ return -1;
+ }
+ this.otherFiles = action;
}
- } catch (ParseException e) {
- LOG.error("options parsing failed", e);
-
- HelpFormatter formatter = new HelpFormatter();
- formatter.printHelp("options are: ", opts);
}
+ return 0;
}
+ private void usage() {
+ System.err.println("Usage: bin/hbase migrate { check | migrate } [options]\n");
+ System.err.println(" check perform migration checks only.");
+ System.err.println(" migrate perform migration checks and modify hbase.\n");
+ System.err.println(" Options are:");
+ System.err.println(" -logfiles={abort|ignore|delete|prompt}");
+ System.err.println(" action to take when unrecovered region");
+ System.err.println(" server log files are found.\n");
+ System.err.println(" -extrafiles={abort|ignore|delete|prompt}");
+ System.err.println(" action to take if \"extra\" files are found.\n");
+ System.err.println(" -conf specify an application configuration file");
+ System.err.println(" -D use value for given property");
+ System.err.println(" -fs specify a namenode");
+ }
+
private synchronized String prompt(String prompt) {
System.out.print(prompt + " > ");
System.out.flush();
@@ -441,13 +523,9 @@
* @param args command line arguments
*/
public static void main(String[] args) {
- Tool t = new Migrate();
- GenericOptionsParser hadoopOpts =
- new GenericOptionsParser(t.getConf(), args);
-
int status = 0;
try {
- status = ToolRunner.run(t, hadoopOpts.getRemainingArgs());
+ status = ToolRunner.run(new Migrate(), args);
} catch (Exception e) {
LOG.error("exiting due to error", e);
status = -1;
Index: src/contrib/hbase/bin/hbase
===================================================================
--- src/contrib/hbase/bin/hbase (revision 613773)
+++ src/contrib/hbase/bin/hbase (working copy)
@@ -63,6 +63,7 @@
echo " regionserver run an Hbase HRegionServer node"
echo " rest run an Hbase REST server"
echo " thrift run an Hbase Thrift server"
+ echo " migrate upgrade an hbase.rootdir"
echo " or"
echo " CLASSNAME run the class named CLASSNAME"
echo "Most commands print help when invoked w/o parameters."
@@ -219,6 +220,8 @@
CLASS='org.apache.hadoop.hbase.rest.Dispatcher'
elif [ "$COMMAND" = "thrift" ] ; then
CLASS='org.apache.hadoop.hbase.thrift.ThriftServer'
+elif [ "$COMMAND" = "migrate" ] ; then
+ CLASS='org.apache.hadoop.hbase.util.Migrate'
else
CLASS=$COMMAND
fi