diff --git bin/hbase bin/hbase
index 6430dc5..e02ca45 100755
--- bin/hbase
+++ bin/hbase
@@ -88,6 +88,8 @@ if [ $# = 0 ]; then
echo " wal Write-ahead-log analyzer"
echo " hfile Store file analyzer"
echo " zkcli Run the ZooKeeper shell"
+ echo " backup backup tables for recovery"
+ echo " restore restore tables from existing backup image"
echo " upgrade Upgrade hbase"
echo " master Run an HBase HMaster node"
echo " regionserver Run an HBase HRegionServer node"
@@ -305,6 +307,10 @@ elif [ "$COMMAND" = "hfile" ] ; then
CLASS='org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter'
elif [ "$COMMAND" = "zkcli" ] ; then
CLASS="org.apache.hadoop.hbase.zookeeper.ZooKeeperMainServer"
+elif [ "$COMMAND" = "backup" ] ; then
+ CLASS='org.apache.hadoop.hbase.backup.BackupClient'
+elif [ "$COMMAND" = "restore" ] ; then
+ CLASS='org.apache.hadoop.hbase.backup.RestoreClient'
elif [ "$COMMAND" = "upgrade" ] ; then
CLASS="org.apache.hadoop.hbase.migration.UpgradeTo96"
elif [ "$COMMAND" = "snapshot" ] ; then
diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 32f07cb..80f4514 100644
--- hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1192,6 +1192,16 @@ public final class HConstants {
public static final String REGION_SPLIT_THREADS_MAX =
"hbase.regionserver.region.split.threads.max";
+ /**
+ * Backup/Restore constants
+ */
+
+ public final static String BACKUP_ENABLE_KEY = "hbase.backup.enable";
+ public final static boolean BACKUP_ENABLE_DEFAULT = true;
+ public final static String BACKUP_SYSTEM_TTL_KEY = "hbase.backup.system.ttl";
+ // Default TTL = 1 year
+ public final static int BACKUP_SYSTEM_TTL_DEFAULT = 365 * 24 * 3600;
+
private HConstants() {
// Can't be instantiated with this ctor.
}
diff --git hbase-server/pom.xml hbase-server/pom.xml
index 229d11d..33436c3 100644
--- hbase-server/pom.xml
+++ hbase-server/pom.xml
@@ -357,6 +357,11 @@
commons-collections
+ org.apache.hadoop
+ hadoop-distcp
+ ${hadoop-two.version}
+
+
org.apache.hbase
hbase-hadoop-compat
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java
new file mode 100644
index 0000000..de32bdb
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java
@@ -0,0 +1,547 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.backup.BackupHandler.BACKUPSTATUS;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants.BACKUP_COMMAND;
+import org.apache.hadoop.hbase.backup.BackupUtil.BackupCompleteData;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+
+/**
+ * Backup HBase tables locally or on a remote cluster
+ *
+ * Serve as client entry point for the following features:
+ * - Full Backup provide local and remote back/restore for a list of tables
+ * - Incremental backup to build on top of full backup as daily/weekly backup
+ * - Convert incremental backup WAL files into hfiles
+ * - Merge several backup images into one(like merge weekly into monthly)
+ * - Add and remove table to and from Backup image
+ * - Cancel a backup process
+ * - Full backup based on existing snapshot
+ * - Describe information of a backup image
+ */
+
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class BackupClient {
+ private static final Log LOG = LogFactory.getLog(BackupClient.class);
+ private static Options opt;
+ private static Configuration conf = null;
+
+ public static void init() throws IOException {
+ // define supported options
+ opt = new Options();
+
+ opt.addOption("all", false, "All tables");
+ opt.addOption("debug", false, "Enable debug loggings");
+ opt.addOption("t", true, "Table name");
+ opt.addOption("b", true, "Bandwidth (MB/s)");
+ opt.addOption("w", true, "Number of workers");
+ opt.addOption("n", true, "History length");
+ opt.addOption("set", true, "Backup set name");
+
+ // create configuration instance
+ conf = getConf();
+
+ // disable irrelevant loggers to avoid it mess up command output
+ disableUselessLoggers();
+
+ }
+
+ /**
+ * @param args
+ * @throws IOException
+ * @throws KeeperException
+ */
+ public static void main(String[] args) throws IOException {
+ init();
+ parseAndRun(args);
+ System.exit(0);
+ }
+
+ /**
+ * Set the configuration from a given one.
+ * @param newConf A new given configuration
+ */
+ public synchronized static void setConf(Configuration newConf) {
+ conf = newConf;
+ BackupUtil.setConf(newConf);
+ }
+
+ public static Configuration getConf() {
+ if (conf == null) {
+ conf = BackupUtil.getConf();
+ }
+ return conf;
+ }
+
+ private static void disableUselessLoggers() {
+ // disable zookeeper log to avoid it mess up command output
+ Logger zkLogger = Logger.getLogger("org.apache.zookeeper");
+ LOG.debug("Zookeeper log level before set: " + zkLogger.getLevel());
+ zkLogger.setLevel(Level.OFF);
+ LOG.debug("Zookeeper log level after set: " + zkLogger.getLevel());
+
+ // disable hbase zookeeper tool log to avoid it mess up command output
+ Logger hbaseZkLogger = Logger.getLogger("org.apache.hadoop.hbase.zookeeper");
+ LOG.debug("HBase zookeeper log level before set: " + hbaseZkLogger.getLevel());
+ hbaseZkLogger.setLevel(Level.OFF);
+ LOG.debug("HBase Zookeeper log level after set: " + hbaseZkLogger.getLevel());
+
+ // disable hbase client log to avoid it mess up command output
+ Logger hbaseClientLogger = Logger.getLogger("org.apache.hadoop.hbase.client");
+ LOG.debug("HBase client log level before set: " + hbaseClientLogger.getLevel());
+ hbaseClientLogger.setLevel(Level.OFF);
+ LOG.debug("HBase client log level after set: " + hbaseClientLogger.getLevel());
+ }
+
+ public static void parseAndRun(String[] args) throws IOException {
+
+ String cmd = null;
+ String[] remainArgs = null;
+ if (args == null || args.length == 0) {
+ BackupCommands.createCommand(BackupRestoreConstants.BACKUP_COMMAND.HELP, null).execute();
+ } else {
+ cmd = args[0];
+ remainArgs = new String[args.length - 1];
+ if (args.length > 1) {
+ System.arraycopy(args, 1, remainArgs, 0, args.length - 1);
+ }
+ }
+ CommandLine cmdline = null;
+ try {
+ cmdline = new PosixParser().parse(opt, remainArgs);
+ } catch (ParseException e) {
+ LOG.error("Could not parse command", e);
+ System.exit(-1);
+ }
+
+ BACKUP_COMMAND type = BACKUP_COMMAND.HELP;
+ if (BACKUP_COMMAND.CREATE.name().equalsIgnoreCase(cmd)) {
+ type = BACKUP_COMMAND.CREATE;
+ } else if (BACKUP_COMMAND.HELP.name().equalsIgnoreCase(cmd)) {
+ type = BACKUP_COMMAND.HELP;
+ } else {
+ System.out.println("Unsupported command for backup: " + cmd);
+ }
+
+ // enable debug logging
+ Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup");
+ if (cmdline.hasOption("debug")) {
+ backupClientLogger.setLevel(Level.DEBUG);
+ } else {
+ backupClientLogger.setLevel(Level.INFO);
+ }
+
+ BackupCommands.createCommand(type, cmdline).execute();
+ }
+
+ public static String create(String backupType, String backupRootPath, String tableListStr,
+ String snapshot) throws IOException {
+ return create(backupType, backupRootPath, tableListStr, snapshot,-1, -1);
+ }
+
+ /**
+ * Send backup request to server, and monitor the progress if necessary
+ * @param backupType : full or incremental
+ * @param backupRootPath : the rooPath specified by user
+ * @param tableListStr : the table list specified by user
+ * @param snapshot : using existing snapshot if specified by user (in future jira)
+ * @param workers : number of parallel workers (mappers in M/R)
+ * @param bandwidth - MB sec per one worker (IO throttling)
+ * @return backupId
+ * @throws IOException
+ * @throws KeeperException
+ */
+ public static String create(String backupType, String backupRootPath, String tableListStr,
+ String snapshot, int workers, int bandwidth) throws IOException {
+
+ String backupId = BackupRestoreConstants.BACKUPID_PREFIX + EnvironmentEdgeManager.currentTime();
+
+ // check target path first, confirm it doesn't exist before backup
+ boolean isTargetExist = false;
+ try {
+ isTargetExist = HBackupFileSystem.checkPathExist(backupRootPath, conf);
+ } catch (IOException e) {
+ String expMsg = e.getMessage();
+ String newMsg = null;
+ if (expMsg.contains("No FileSystem for scheme")) {
+ newMsg =
+ "Unsupported filesystem scheme found in the backup target url. Error Message: "
+ + newMsg;
+ LOG.error(newMsg);
+ throw new IOException(newMsg);
+ } else if (expMsg.contains("no authority supported")) {
+ newMsg = "webhdfs url will be supported in a future jira";
+ LOG.error(newMsg);
+ throw new IOException(newMsg);
+ } else {
+ throw e;
+ }
+ } catch (RuntimeException e) {
+ LOG.error(e.getMessage());
+ throw e;
+ }
+
+ if (isTargetExist) {
+ LOG.info("Using existing backup root dir: " + backupRootPath);
+ } else {
+ LOG.info("Backup root dir " + backupRootPath + " does not exist. Will be created.");
+ }
+
+ // table list specified for backup, trigger backup on specified tables
+ String tableList =
+ (tableListStr == null) ? null : tableListStr.replaceAll(
+ BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND,
+ BackupRestoreConstants.TABLENAME_DELIMITER_IN_ZNODE);
+ try {
+ requestBackup(backupId, backupType, tableList, backupRootPath,
+ snapshot, workers, bandwidth);
+ } catch (RuntimeException e) {
+ String errMsg = e.getMessage();
+ if (errMsg != null
+ && (errMsg.startsWith("Non-existing tables found") || errMsg
+ .startsWith("Snapshot is not found"))) {
+ LOG.error(errMsg + ", please check your command");
+ throw e;
+ } else {
+ throw e;
+ }
+ }
+ return backupId;
+ }
+
+ /**
+ * Prepare and submit Backup request
+ * @param backupId : backup_timestame (something like backup_1398729212626)
+ * @param backupType : full or incremental
+ * @param tableList : tables to be backed up
+ * @param targetRootDir : specified by user
+ * @param snapshot : use existing snapshot if specified by user (for future jira)
+ * @param workers : number of parallel workers (mappers in M/R)
+ * @param bandwidth : in MB sec bandwidth per worker
+ * @throws IOException
+ */
+ protected static void requestBackup(String backupId, String backupType, String tableList,
+ String targetRootDir, String snapshot, int workers, int bandwidth) throws IOException {
+
+ Configuration conf = getConf();
+ BackupManager backupManager = null;
+ BackupContext backupContext = null;
+ if (snapshot != null) {
+ LOG.warn("Snapshot option specified, backup type and table option will be ignored,\n"
+ + "full backup will be taken based on the given snapshot.");
+ throw new IOException("backup using existing Snapshot will be implemented in future jira");
+ }
+
+ Admin hbadmin = null;
+ Connection conn = null;
+ try {
+
+ backupManager = new BackupManager(conf);
+ String tables = tableList;
+ if (backupType.equals(BackupRestoreConstants.BACKUP_TYPE_INCR)) {
+ Set incrTableSet = backupManager.getIncrementalBackupTableSet();
+ if (incrTableSet.isEmpty()) {
+ LOG.warn("Incremental backup table set contains no table.\n"
+ + "Use 'backup create full' or 'backup stop' to \n "
+ + "change the tables covered by incremental backup.");
+ throw new RuntimeException("No table covered by incremental backup.");
+ }
+ StringBuilder sb = new StringBuilder();
+ for (String tableName : incrTableSet) {
+ sb.append(tableName + " ");
+ }
+ LOG.info("Incremental backup for the following table set: " + sb.toString());
+ tables =
+ sb.toString().trim()
+ .replaceAll(" ", BackupRestoreConstants.TABLENAME_DELIMITER_IN_ZNODE);
+ }
+
+ // check whether table exists first before starting real request
+ if (tables != null) {
+ String[] tableNames = tables.split(BackupRestoreConstants.TABLENAME_DELIMITER_IN_ZNODE);
+ ArrayList noneExistingTableList = null;
+ conn = ConnectionFactory.createConnection(conf);
+ hbadmin = conn.getAdmin();
+ for (String tableName : tableNames) {
+ if (!hbadmin.tableExists(TableName.valueOf(tableName))) {
+ if (noneExistingTableList == null) {
+ noneExistingTableList = new ArrayList();
+ }
+ noneExistingTableList.add(tableName);
+ }
+ }
+ if (noneExistingTableList != null) {
+ if (backupType.equals(BackupRestoreConstants.BACKUP_TYPE_INCR)) {
+ LOG.warn("Incremental backup table set contains no-exising table: "
+ + noneExistingTableList);
+ } else {
+ // Throw exception only in full mode - we try to backup non-existing tble
+ throw new RuntimeException("Non-existing tables found in the table list: "
+ + noneExistingTableList);
+ }
+ }
+ }
+
+ // if any target table backup dir already exist, then no backup action taken
+ String[] tableNames = null;
+ if (tables != null && !tables.equals("")) {
+ tableNames = tables.split(BackupRestoreConstants.TABLENAME_DELIMITER_IN_ZNODE);
+ }
+ if (tableNames != null && tableNames.length > 0) {
+ for (String table : tableNames) {
+ String targetTableBackupDir =
+ HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table);
+ Path targetTableBackupDirPath = new Path(targetTableBackupDir);
+ FileSystem outputFs = FileSystem.get(targetTableBackupDirPath.toUri(), conf);
+ if (outputFs.exists(targetTableBackupDirPath)) {
+ throw new IOException("Target backup directory " + targetTableBackupDir
+ + " exists already.");
+ }
+ }
+ }
+ backupContext =
+ backupManager.createBackupContext(backupId, backupType, tables,
+ targetRootDir, snapshot, workers, bandwidth);
+ backupManager.initialize();
+ backupManager.dispatchRequest(backupContext);
+ } catch (BackupException e) {
+ // suppress the backup exception wrapped within #initialize or #dispatchRequest, backup
+ // exception has already been handled normally
+ StackTraceElement[] stes = e.getStackTrace();
+ for (StackTraceElement ste : stes) {
+ LOG.info(ste);
+ }
+ LOG.error("Backup Exception " + e.getMessage());
+ } finally {
+ if (hbadmin != null) {
+ hbadmin.close();
+ }
+ if (conn != null) {
+ conn.close();
+ }
+ }
+ }
+
+ public static void describeBackupImage(String backupId) {
+ Configuration conf = getConf();
+ BackupContext backupContext = null;
+ try {
+ BackupSystemTable systemTable = BackupSystemTable.getTable(conf);
+ backupContext = systemTable.readBackupStatus(backupId);
+ if (backupContext != null) {
+ System.out.println(backupContext.getShortDescription());
+ } else {
+ System.out.println("No information found for backupID=" + backupId);
+ }
+ } catch (IOException e) {
+ System.out.println("describe failed: ");
+ e.printStackTrace();
+ }
+ }
+
+ public static void showProgress(String backupId) {
+ Configuration conf = getConf();
+ BackupContext backupContext = null;
+
+ try {
+ BackupSystemTable systemTable = BackupSystemTable.getTable(conf);
+
+ if (backupId == null) {
+ ArrayList recentSessions =
+ systemTable.getBackupContexts(BACKUPSTATUS.ONGOING);
+ if (recentSessions.isEmpty()) {
+ System.out.println("No ongonig sessions found.");
+ return;
+ }
+ // else show status for all ongoing sessions
+ // must be one maximum
+ for (BackupContext context : recentSessions) {
+ System.out.println(context.getStatusAndProgressAsString());
+ }
+ } else {
+
+ backupContext = systemTable.readBackupStatus(backupId);
+ if (backupContext != null) {
+ System.out.println(backupContext.getStatusAndProgressAsString());
+ } else {
+ System.out.println("No information found for backupID=" + backupId);
+ }
+ }
+ } catch (IOException e) {
+ System.out.println("describe failed: ");
+ e.printStackTrace();
+ }
+ }
+
+ public static void deleteBackups(String[] backupIds) {
+ Configuration conf = getConf();
+ BackupContext backupContext = null;
+ String backupId = null;
+ try {
+ BackupSystemTable systemTable = BackupSystemTable.getTable(conf);
+ for (int i = 0; i < backupIds.length; i++) {
+ backupId = backupIds[i];
+ backupContext = systemTable.readBackupStatus(backupId);
+ if (backupContext != null) {
+ BackupUtil.cleanupBackupData(backupContext);
+ systemTable.deleteBackupStatus(backupContext.getBackupId());
+ System.out.println("Delete backup for backupID=" + backupId + " completed.");
+ } else {
+ System.out.println("Delete backup failed: no information found for backupID=" + backupId);
+ }
+ }
+ } catch (IOException e) {
+ System.out.println("delete failed: " + backupId);
+ e.printStackTrace();
+ }
+ }
+
+ public static void cancelBackup(String backupId) {
+ // Kill distributed job if active
+ Configuration conf = getConf();
+ try {
+ BackupSystemTable systemTable = BackupSystemTable.getTable(conf);
+ BackupContext backupContext = systemTable.readBackupStatus(backupId);
+ if (backupContext != null) {
+ BackupUtil.cleanupBackupData(backupContext);
+ systemTable.deleteBackupStatus(backupContext.getBackupId());
+ byte[] jobId = backupContext.getJobId();
+ BackupCopyService service = BackupRestoreServiceFactory.getBackupCopyService(conf);
+ service.cancelJob(jobId);
+ } else {
+ System.out.println("No information found for backupID=" + backupId);
+ }
+ } catch (IOException e) {
+ System.out.println("delete failed: " + backupId);
+ e.printStackTrace();
+ }
+ // then clean backup image
+ deleteBackups(new String[] { backupId });
+ }
+
+ public static void showHistory(int n) {
+ Configuration conf = getConf();
+ try {
+ BackupSystemTable systemTable = BackupSystemTable.getTable(conf);
+ List history = systemTable.getBackupHistory();
+
+ int max = Math.min(n, history.size());
+ for (int i = 0; i < max; i++) {
+ printBackupCompleteData(history.get(i));
+ }
+ } catch (IOException e) {
+ System.out.println("history failed");
+ e.printStackTrace();
+ }
+ }
+
+ private static void printBackupCompleteData(BackupCompleteData backupCompleteData) {
+ System.out.println(backupCompleteData.toString());
+ }
+
+ public static void backupSetList() {
+ Configuration conf = getConf();
+ try {
+ BackupSystemTable systemTable = BackupSystemTable.getTable(conf);
+ List list = systemTable.backupSetList();
+ for(String s: list){
+ System.out.println(s);
+ }
+ System.out.println("Found "+list.size() +" records");
+ } catch (IOException e) {
+ System.out.println("Backup set list failed");
+ e.printStackTrace();
+ }
+ }
+
+ public static void backupSetDescribe(String name) {
+ Configuration conf = getConf();
+ try {
+ BackupSystemTable systemTable = BackupSystemTable.getTable(conf);
+ String[] list = systemTable.backupSetDescribe(name);
+ for(String s: list){
+ System.out.println(s);
+ }
+ System.out.println("Found "+list.length +" records");
+ } catch (IOException e) {
+ System.out.println("Backup set describe failed for "+name);
+ e.printStackTrace();
+ }
+ }
+
+ public static void backupSetDelete(String name) {
+ Configuration conf = getConf();
+ try {
+ BackupSystemTable systemTable = BackupSystemTable.getTable(conf);
+ systemTable.backupSetDelete(name);
+ System.out.println("Deleted "+name);
+ } catch (IOException e) {
+ System.out.println("Backup set delete failed for "+name);
+ e.printStackTrace();
+ }
+ }
+
+ public static void backupSetAdd(String name, String[] tables) {
+ Configuration conf = getConf();
+ try {
+ BackupSystemTable systemTable = BackupSystemTable.getTable(conf);
+ systemTable.backupSetAdd(name, tables);
+ System.out.println("Added tables to '"+name+"'");
+ } catch (IOException e) {
+ System.out.println("Backup set add failed for "+name);
+ e.printStackTrace();
+ }
+ }
+
+ public static void backupSetRemove(String name, String[] tables) {
+ Configuration conf = getConf();
+ try {
+ BackupSystemTable systemTable = BackupSystemTable.getTable(conf);
+ systemTable.backupSetRemove(name, tables);
+ System.out.println("Removed tables from '"+name+"'");
+ } catch (IOException e) {
+ System.out.println("Backup set remove failed for "+name);
+ e.printStackTrace();
+ }
+ }
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCommands.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCommands.java
new file mode 100644
index 0000000..f80f7cb
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCommands.java
@@ -0,0 +1,492 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants.BACKUP_COMMAND;
+
+/**
+ * General backup commands, options and usage messages
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+class BackupCommands {
+
+ private static final String USAGE = "Usage: hbase backup COMMAND\n"
+ + "where COMMAND is one of:\n"
+ + " create create a new backup image\n"
+ + " cancel cancel an ongoing backup\n"
+ + " delete delete an existing backup image\n"
+ + " describe show the detailed information of a backup image\n"
+ + " history show history of all successful backups\n"
+ + " progress show the progress of the latest backup request\n"
+ + " convert convert incremental backup WAL files into HFiles\n"
+ + " merge merge backup images\n"
+ + " set backup set management\n"
+ + "Enter \'help COMMAND\' to see help message for each command\n";
+
+ private static final String CREATE_CMD_USAGE =
+ "Usage: hbase backup create [tables] [-s name] [-convert] "
+ + "[-silent] [-w workers][-b bandwith]\n" + " type \"full\" to create a full backup image;\n"
+ + " \"incremental\" to create an incremental backup image\n"
+ + " backup_root_path The full root path to store the backup image,\n"
+ + " the prefix can be gpfs, hdfs or webhdfs\n" + " Options:\n"
+ + " tables If no tables (\"\") are specified, all tables are backed up. "
+ + "Otherwise it is a\n" + " comma separated list of tables.\n"
+ + " -s name Use the specified snapshot for full backup\n"
+ + " -convert For an incremental backup, convert WAL files to HFiles\n"
+ + " -w number of parallel workers.\n"
+ + " -b bandwith per one worker (in MB sec)" ;
+
+ private static final String PROGRESS_CMD_USAGE = "Usage: hbase backup progress \n"
+ + " backupId backup image id;\n";
+
+ private static final String DESCRIBE_CMD_USAGE = "Usage: hbase backup decsribe \n"
+ + " backupId backup image id\n";
+
+ private static final String HISTORY_CMD_USAGE = "Usage: hbase backup history [-n N]\n"
+ + " -n N show up to N last backup sessions, default - 10;\n";
+
+ private static final String DELETE_CMD_USAGE = "Usage: hbase backup delete \n"
+ + " backupId backup image id;\n";
+
+ private static final String CANCEL_CMD_USAGE = "Usage: hbase backup progress \n"
+ + " backupId backup image id;\n";
+
+ private static final String SET_CMD_USAGE = "Usage: hbase set COMMAND [name] [tables]\n"
+ + " name Backup set name\n"
+ + " tables If no tables (\"\") are specified, all tables will belong to the set. "
+ + "Otherwise it is a\n" + " comma separated list of tables.\n"
+ + "where COMMAND is one of:\n"
+ + " add add tables to a set, crete set if needed\n"
+ + " remove remove tables from set\n"
+ + " list list all sets\n"
+ + " describe describes set\n"
+ + " delete delete backup set\n";
+
+
+ interface Command {
+ void execute() throws IOException;
+ }
+
+ static Command createCommand(BACKUP_COMMAND type, CommandLine cmdline) {
+ Command cmd = null;
+ switch (type) {
+ case CREATE:
+ cmd = new CreateCommand(cmdline);
+ break;
+ case DESCRIBE:
+ cmd = new DescribeCommand(cmdline);
+ break;
+ case PROGRESS:
+ cmd = new ProgressCommand(cmdline);
+ break;
+ case DELETE:
+ cmd = new DeleteCommand(cmdline);
+ break;
+ case CANCEL:
+ cmd = new CancelCommand(cmdline);
+ break;
+ case HISTORY:
+ cmd = new HistoryCommand(cmdline);
+ break;
+ case SET:
+ cmd = new BackupSetCommand(cmdline);
+ break;
+ case HELP:
+ default:
+ cmd = new HelpCommand(cmdline);
+ break;
+ }
+
+ return cmd;
+ }
+
+ private static class CreateCommand implements Command {
+ CommandLine cmdline;
+
+ CreateCommand(CommandLine cmdline) {
+ this.cmdline = cmdline;
+ }
+
+ @Override
+ public void execute() throws IOException {
+ if (cmdline == null || cmdline.getArgs() == null) {
+ System.out.println("ERROR: missing arguments");
+ System.out.println(CREATE_CMD_USAGE);
+ System.exit(-1);
+ }
+ String[] args = cmdline.getArgs();
+ if (args.length < 2 ) {
+ System.out.println("ERROR: wrong number of arguments");
+ System.out.println(CREATE_CMD_USAGE);
+ System.exit(-1);
+ }
+
+ if (!BackupRestoreConstants.BACKUP_TYPE_FULL.equalsIgnoreCase(args[0])
+ && !BackupRestoreConstants.BACKUP_TYPE_INCR.equalsIgnoreCase(args[0])) {
+ System.out.println("ERROR: invalid backup type");
+ System.out.println(CREATE_CMD_USAGE);
+ System.exit(-1);
+ }
+
+ String snapshot = cmdline.hasOption('s') ? cmdline.getOptionValue('s') : null;
+ String tables = null;
+ // Check backup set
+ if(cmdline.hasOption("set")){
+ String setName = cmdline.getOptionValue("set");
+ tables = getTablesForSet(setName);
+ if(tables == null)
+ throw new IOException("Backup set '"+setName +"' is either empty or does not exist");
+ } else{
+ tables = (args.length == 3) ? args[2] : null;
+ }
+ int bandwidth = cmdline.hasOption('b') ? Integer.parseInt(cmdline.getOptionValue('b')): -1;
+ int workers = cmdline.hasOption('w') ? Integer.parseInt(cmdline.getOptionValue('w')): -1;
+ try {
+ BackupClient.create(args[0], args[1], tables, snapshot, workers, bandwidth);
+ } catch (RuntimeException e) {
+ System.out.println("ERROR: " + e.getMessage());
+ System.exit(-1);
+ }
+ }
+
+ private String getTablesForSet(String name) throws IOException {
+ BackupSystemTable table =
+ BackupSystemTable.getTable(BackupClient.getConf());
+ String[] tables = table.backupSetDescribe(name);
+ if(tables == null) return null;
+ StringBuffer sb = new StringBuffer();
+ for(int i=0; i < tables.length; i++){
+ sb.append(tables[i]);
+ if(i < tables.length -1){
+ sb.append(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND);
+ }
+ }
+ return sb.toString();
+ }
+ }
+
+ private static class HelpCommand implements Command {
+ CommandLine cmdline;
+
+ HelpCommand(CommandLine cmdline) {
+ this.cmdline = cmdline;
+ }
+
+ @Override
+ public void execute() throws IOException {
+ if (cmdline == null) {
+ System.out.println(USAGE);
+ System.exit(0);
+ }
+
+ String[] args = cmdline.getArgs();
+ if (args == null || args.length == 0) {
+ System.out.println(USAGE);
+ System.exit(0);
+ }
+
+ if (args.length != 1) {
+ System.out.println("Only support check help message of a single command type");
+ System.out.println(USAGE);
+ System.exit(0);
+ }
+
+ String type = args[0];
+
+ if (BACKUP_COMMAND.CREATE.name().equalsIgnoreCase(type)) {
+ System.out.println(CREATE_CMD_USAGE);
+ } else if (BACKUP_COMMAND.DESCRIBE.name().equalsIgnoreCase(type)) {
+ System.out.println(DESCRIBE_CMD_USAGE);
+ } else if (BACKUP_COMMAND.HISTORY.name().equalsIgnoreCase(type)) {
+ System.out.println(HISTORY_CMD_USAGE);
+ } else if (BACKUP_COMMAND.PROGRESS.name().equalsIgnoreCase(type)) {
+ System.out.println(PROGRESS_CMD_USAGE);
+ } else if (BACKUP_COMMAND.DELETE.name().equalsIgnoreCase(type)) {
+ System.out.println(DELETE_CMD_USAGE);
+ } if (BACKUP_COMMAND.CANCEL.name().equalsIgnoreCase(type)) {
+ System.out.println(CANCEL_CMD_USAGE);
+ } if (BACKUP_COMMAND.SET.name().equalsIgnoreCase(type)) {
+ System.out.println(SET_CMD_USAGE);
+ } else{
+ System.out.println("Unknown command : " + type);
+ System.out.println(USAGE);
+ }
+ System.exit(0);
+ }
+ }
+
+ private static class DescribeCommand implements Command {
+ CommandLine cmdline;
+
+ DescribeCommand(CommandLine cmdline) {
+ this.cmdline = cmdline;
+ }
+
+ @Override
+ public void execute() throws IOException {
+ if (cmdline == null || cmdline.getArgs() == null) {
+ System.out.println("ERROR: missing arguments");
+ System.out.println(DESCRIBE_CMD_USAGE);
+ System.exit(-1);
+ }
+ String[] args = cmdline.getArgs();
+ if (args.length != 1) {
+ System.out.println("ERROR: wrong number of arguments");
+ System.out.println(DESCRIBE_CMD_USAGE);
+ System.exit(-1);
+ }
+
+ String backupId = args[0];
+ try {
+ BackupClient.describeBackupImage(backupId);
+ } catch (RuntimeException e) {
+ System.out.println("ERROR: " + e.getMessage());
+ System.exit(-1);
+ }
+ }
+ }
+
+ private static class ProgressCommand implements Command {
+ CommandLine cmdline;
+
+ ProgressCommand(CommandLine cmdline) {
+ this.cmdline = cmdline;
+ }
+
+ @Override
+ public void execute() throws IOException {
+ if (cmdline == null || cmdline.getArgs() == null) {
+ System.out.println("No backup id was specified, "
+ + "will retrieve the most recent (ongoing) sessions");
+ }
+ String[] args = cmdline.getArgs();
+ if (args.length > 1) {
+ System.out.println("ERROR: wrong number of arguments: " + args.length);
+ System.out.println(PROGRESS_CMD_USAGE);
+ System.exit(-1);
+ }
+
+ String backupId = args == null ? null : args[0];
+ try {
+ BackupClient.showProgress(backupId);
+ } catch (RuntimeException e) {
+ System.out.println("ERROR: " + e.getMessage());
+ System.exit(-1);
+ }
+ }
+ }
+
+ private static class DeleteCommand implements Command {
+ CommandLine cmdline;
+
+ DeleteCommand(CommandLine cmdline) {
+ this.cmdline = cmdline;
+ }
+
+ @Override
+ public void execute() throws IOException {
+ if (cmdline == null || cmdline.getArgs() == null) {
+ System.out.println("No backup id(s) was specified");
+ System.out.println(PROGRESS_CMD_USAGE);
+ System.exit(-1);
+ }
+ String[] args = cmdline.getArgs();
+
+ try {
+ BackupClient.deleteBackups(args);
+ } catch (RuntimeException e) {
+ System.out.println("ERROR: " + e.getMessage());
+ System.exit(-1);
+ }
+ }
+ }
+
+ private static class CancelCommand implements Command {
+ CommandLine cmdline;
+
+ CancelCommand(CommandLine cmdline) {
+ this.cmdline = cmdline;
+ }
+
+ @Override
+ public void execute() throws IOException {
+ if (cmdline == null || cmdline.getArgs() == null) {
+ System.out.println("No backup id(s) was specified, will use the most recent one");
+ }
+ String[] args = cmdline.getArgs();
+ String backupId = args == null || args.length == 0 ? null : args[0];
+ try {
+ BackupClient.cancelBackup(backupId);
+ } catch (RuntimeException e) {
+ System.out.println("ERROR: " + e.getMessage());
+ System.exit(-1);
+ }
+ }
+ }
+
+ private static class HistoryCommand implements Command {
+ CommandLine cmdline;
+
+ HistoryCommand(CommandLine cmdline) {
+ this.cmdline = cmdline;
+ }
+
+ @Override
+ public void execute() throws IOException {
+
+ int n =
+ cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length == 0 ? 10
+ : parseHistoryLength();
+ try {
+ BackupClient.showHistory(n);
+ } catch (RuntimeException e) {
+ System.out.println("ERROR: " + e.getMessage());
+ System.exit(-1);
+ }
+ }
+
+ private int parseHistoryLength() {
+ String value = cmdline.getOptionValue("n");
+ if(value == null) throw new RuntimeException("command line format");
+ return Integer.parseInt(value);
+ }
+ }
+
+ private static class BackupSetCommand implements Command {
+ private final static String SET_ADD_CMD = "add";
+ private final static String SET_REMOVE_CMD = "remove";
+ private final static String SET_DELETE_CMD = "delete";
+ private final static String SET_DESCRIBE_CMD = "describe";
+ private final static String SET_LIST_CMD = "list";
+
+ CommandLine cmdline;
+
+ BackupSetCommand(CommandLine cmdline) {
+ this.cmdline = cmdline;
+ }
+
+ @Override
+ public void execute() throws IOException {
+
+ // Command-line must have at least one element
+ if(cmdline == null
+ || cmdline.getArgs() == null
+ || cmdline.getArgs().length == 0)
+ {
+ throw new IOException("command line format");
+ }
+ String[] args = cmdline.getArgs();
+ String cmdStr = args[0];
+ BACKUP_COMMAND cmd = getCommand(cmdStr);
+
+ try {
+
+ switch(cmd){
+ case SET_ADD:
+ processSetAdd(args);
+ break;
+ case SET_REMOVE:
+ processSetRemove(args);
+ break;
+ case SET_DELETE:
+ processSetDelete(args);
+ break;
+ case SET_DESCRIBE:
+ processSetDescribe(args);
+ break;
+ case SET_LIST:
+ processSetList(args);
+ break;
+ default:
+ break;
+
+ }
+ } catch (RuntimeException e) {
+ System.out.println("ERROR: " + e.getMessage());
+ System.exit(-1);
+ }
+ }
+
+ private void processSetList(String[] args) {
+ // List all backup set names
+ // does not expect any args
+ BackupClient.backupSetList();
+ }
+
+ private void processSetDescribe(String[] args) {
+ if(args == null || args.length != 2){
+ throw new RuntimeException("Wrong args");
+ }
+ String setName = args[1];
+ BackupClient.backupSetDescribe(setName);
+ }
+
+ private void processSetDelete(String[] args) {
+ if(args == null || args.length != 2){
+ throw new RuntimeException("Wrong args");
+ }
+ String setName = args[1];
+ BackupClient.backupSetDelete(setName);
+ }
+
+ private void processSetRemove(String[] args) {
+ if(args == null || args.length != 3){
+ throw new RuntimeException("Wrong args");
+ }
+ String setName = args[1];
+ String[] tables = args[2].split(",");
+ BackupClient.backupSetRemove(setName, tables);
+ }
+
+ private void processSetAdd(String[] args) {
+ if(args == null || args.length != 3){
+ throw new RuntimeException("Wrong args");
+ }
+ String setName = args[1];
+ String[] tables = args[2].split(",");
+ BackupClient.backupSetAdd(setName, tables);
+ }
+
+ private BACKUP_COMMAND getCommand(String cmdStr)
+ throws IOException
+ {
+ if(cmdStr.equals(SET_ADD_CMD)){
+ return BACKUP_COMMAND.SET_ADD;
+ } else if(cmdStr.equals(SET_REMOVE_CMD)){
+ return BACKUP_COMMAND.SET_REMOVE;
+ } else if(cmdStr.equals(SET_DELETE_CMD)){
+ return BACKUP_COMMAND.SET_DELETE;
+ } else if(cmdStr.equals(SET_DESCRIBE_CMD)){
+ return BACKUP_COMMAND.SET_DESCRIBE;
+ } else if(cmdStr.equals(SET_LIST_CMD)){
+ return BACKUP_COMMAND.SET_LIST;
+ } else{
+ throw new IOException("Unknown command for 'set' :"+ cmdStr);
+ }
+ }
+
+ }
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupContext.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupContext.java
new file mode 100644
index 0000000..8bba3d7
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupContext.java
@@ -0,0 +1,380 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.hadoop.hbase.backup.BackupHandler.BACKUPSTATUS;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * An object to encapsulate the information for each backup request
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupContext implements Serializable {
+
+ private static final long serialVersionUID = 2401435114454300992L;
+
+ // backup id: a timestamp when we request the backup
+ private String backupId;
+ // distributes copy job id
+ byte[] jobId;
+
+ public byte[] getJobId() {
+ return jobId;
+ }
+
+ public void setJobId(byte[] jobId) {
+ this.jobId = jobId;
+ }
+
+ // backup type, full or incremental
+ private String type;
+
+ // target root directory for storing the backup files
+ private String targetRootDir;
+
+ // Number of paralle workers
+ private int workers = -1;
+
+ // Bandwidth per worker in MB per sec
+ private int bandwidth = -1;
+
+ // overall backup status
+ private BackupHandler.BACKUPSTATUS flag;
+
+ // overall backup phase
+ private BackupHandler.BACKUPPHASE phase;
+
+ // overall backup failure message
+ private String failedMsg;
+
+ // backup status map for all tables
+ private Map backupStatusMap;
+
+ // actual start timestamp of the backup process
+ private long startTs;
+
+ // actual end timestamp of the backup process, could be fail or complete
+ private long endTs;
+
+ // the total bytes of incremental logs copied
+ private long totalBytesCopied;
+
+ // for incremental backup, the location of the backed-up hlogs
+ private String hlogTargetDir = null;
+
+ // incremental backup file list
+ transient private List incrBackupFileList;
+
+ // new region server log timestamps for table set after distributed log roll
+ // key - table name, value - map of RegionServer hostname -> last log rolled timestamp
+ transient private HashMap> tableSetTimestampMap;
+
+ // cancel flag
+ private boolean cancelled = false;
+ // backup progress string
+
+ private String progress;
+
+ public BackupContext() {
+ }
+
+ public BackupContext(String backupId, String type, String[] tables, String targetRootDir,
+ String snapshot, int workers, int bandwidth) {
+ super();
+
+ if (backupStatusMap == null) {
+ backupStatusMap = new HashMap();
+ }
+
+ this.backupId = backupId;
+ this.type = type;
+ this.targetRootDir = targetRootDir;
+
+ this.addTables(tables);
+
+ if (type.equals(BackupRestoreConstants.BACKUP_TYPE_INCR)) {
+ setHlogTargetDir(HBackupFileSystem.getLogBackupDir(targetRootDir, backupId));
+ }
+
+ this.startTs = this.endTs = 0;
+
+ }
+
+ /**
+ * Set progress string
+ * @param msg
+ */
+
+ public void setProgress(String msg) {
+ this.progress = msg;
+ }
+
+ /**
+ * Get current progress msg
+ */
+ public String getProgress() {
+ return progress;
+ }
+
+ /**
+ * Mark cancel flag.
+ */
+ public void markCancel() {
+ this.cancelled = true;
+ }
+
+ /**
+ * Has been marked as cancelled or not.
+ * @return True if marked as cancelled
+ */
+ public boolean isCancelled() {
+ return this.cancelled;
+ }
+
+ public String getBackupId() {
+ return backupId;
+ }
+
+ public void setBackupId(String backupId) {
+ this.backupId = backupId;
+ }
+
+ public BackupStatus getBackupStatus(String table) {
+ return this.backupStatusMap.get(table);
+ }
+
+ public String getFailedMsg() {
+ return failedMsg;
+ }
+
+ public void setFailedMsg(String failedMsg) {
+ this.failedMsg = failedMsg;
+ }
+
+ public long getStartTs() {
+ return startTs;
+ }
+
+ public void setStartTs(long startTs) {
+ this.startTs = startTs;
+ }
+
+ public long getEndTs() {
+ return endTs;
+ }
+
+ public void setEndTs(long endTs) {
+ this.endTs = endTs;
+ }
+
+ public long getTotalBytesCopied() {
+ return totalBytesCopied;
+ }
+
+ public BackupHandler.BACKUPSTATUS getFlag() {
+ return flag;
+ }
+
+ public void setFlag(BackupHandler.BACKUPSTATUS flag) {
+ this.flag = flag;
+ }
+
+ public BackupHandler.BACKUPPHASE getPhase() {
+ return phase;
+ }
+
+ public void setPhase(BackupHandler.BACKUPPHASE phase) {
+ this.phase = phase;
+ }
+
+ public String getType() {
+ return type;
+ }
+
+ public void setSnapshotName(String table, String snapshotName) {
+ this.backupStatusMap.get(table).setSnapshotName(snapshotName);
+ }
+
+ public String getSnapshotName(String table) {
+ return this.backupStatusMap.get(table).getSnapshotName();
+ }
+
+ public List getSnapshotNames() {
+ List snapshotNames = new ArrayList();
+ for (BackupStatus backupStatus : this.backupStatusMap.values()) {
+ snapshotNames.add(backupStatus.getSnapshotName());
+ }
+ return snapshotNames;
+ }
+
+ public Set getTables() {
+ return this.backupStatusMap.keySet();
+ }
+
+ public String getTableListAsString() {
+ return BackupUtil.concat(backupStatusMap.keySet(), ";");
+ }
+
+ public void addTables(String[] tables) {
+ for (String table : tables) {
+ BackupStatus backupStatus = new BackupStatus(table, this.targetRootDir, this.backupId);
+ this.backupStatusMap.put(table, backupStatus);
+ }
+ }
+
+ public String getTargetRootDir() {
+ return targetRootDir;
+ }
+
+ public void setHlogTargetDir(String hlogTagetDir) {
+ this.hlogTargetDir = hlogTagetDir;
+ }
+
+ public String getHLogTargetDir() {
+ return hlogTargetDir;
+ }
+
+ public List getIncrBackupFileList() {
+ return incrBackupFileList;
+ }
+
+ public List setIncrBackupFileList(List incrBackupFileList) {
+ return this.incrBackupFileList = incrBackupFileList;
+ }
+
+ /**
+ * Set the new region server log timestamps after distributed log roll
+ * @param newTableSetTimestampMap
+ */
+ public void setIncrTimestampMap(HashMap> newTableSetTimestampMap) {
+ this.tableSetTimestampMap = newTableSetTimestampMap;
+ }
+
+ /**
+ * Get new region server log timestamps after distributed log roll
+ * @return new region server log timestamps
+ */
+ public HashMap> getIncrTimestampMap() {
+ return this.tableSetTimestampMap;
+ }
+
+ /**
+ * Get existing snapshot if backing up from existing snapshot.
+ * @return The existing snapshot, null if not backing up from existing snapshot
+ */
+ public String getExistingSnapshot() {
+ // this feature will be supported in another Jira
+ return null;
+ }
+
+ public String getShortDescription() {
+ StringBuffer sb = new StringBuffer();
+ sb.append("ID : " + backupId).append("\n");
+ sb.append("Tables : " + getTableListAsString()).append("\n");
+ sb.append("Status : " + getFlag()).append("\n");
+ Date date = null;
+ Calendar cal = Calendar.getInstance();
+ cal.setTimeInMillis(getStartTs());
+ date = cal.getTime();
+ sb.append("Start time : " + date).append("\n");
+ if (flag == BACKUPSTATUS.FAILED) {
+ sb.append("Failed message : " + getFailedMsg()).append("\n");
+ } else if (flag == BACKUPSTATUS.ONGOING) {
+ sb.append("Phase : " + getPhase()).append("\n");
+ sb.append("Progress : " + getProgress()).append("\n");
+ } else if (flag == BACKUPSTATUS.COMPLETE) {
+ cal = Calendar.getInstance();
+ cal.setTimeInMillis(getEndTs());
+ date = cal.getTime();
+ sb.append("Start time : " + date).append("\n");
+ }
+ return sb.toString();
+ }
+
+ public String getStatusAndProgressAsString() {
+ StringBuffer sb = new StringBuffer();
+ sb.append("id: ").append(getBackupId()).append(" state: ").append(getFlag())
+ .append(" progress: ").append(getProgress());
+ return sb.toString();
+ }
+
+ /**
+ * Check whether this backup context are for backing up from existing snapshot or not.
+ * @return true if it is for backing up from existing snapshot, otherwise false
+ */
+ public boolean fromExistingSnapshot() {
+ // this feature will be supported in later jiras
+ return false;
+ }
+
+ public String getTableBySnapshot(String snapshotName) {
+ for (Entry entry : this.backupStatusMap.entrySet()) {
+ if (snapshotName.equals(entry.getValue().getSnapshotName())) {
+ return entry.getKey();
+ }
+ }
+ return null;
+ }
+
+ public byte[] toByteArray() throws IOException {
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ ObjectOutputStream oos = new ObjectOutputStream(baos);
+ oos.writeObject(this);
+ return baos.toByteArray();
+ }
+
+ public static BackupContext fromByteArray(byte[] data) throws IOException, ClassNotFoundException {
+ ByteArrayInputStream bais = new ByteArrayInputStream(data);
+ ObjectInputStream ois = new ObjectInputStream(bais);
+ return (BackupContext) ois.readObject();
+ }
+
+ public int getBandwidth() {
+ return bandwidth;
+ }
+
+ public void setBandwidth(int bandwidth) {
+ this.bandwidth = bandwidth;
+ }
+
+ public int getWorkers() {
+ return workers;
+ }
+
+ public void setWorkers(int workers) {
+ this.workers = workers;
+ }
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyService.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyService.java
new file mode 100644
index 0000000..ab34f24
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyService.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface BackupCopyService extends Configurable {
+ static enum Type {
+ FULL, INCREMENTAL
+ }
+
+ public int copy(BackupHandler backupHandler, Configuration conf, BackupCopyService.Type copyType,
+ String[] options) throws IOException;
+
+ public void cancelJob(byte[] jobId) throws IOException;
+
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupException.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupException.java
new file mode 100644
index 0000000..649df34
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupException.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.HBaseIOException;
+
+/**
+ * Backup exception
+ */
+@SuppressWarnings("serial")
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupException extends HBaseIOException {
+ private BackupContext description;
+
+ /**
+ * Some exception happened for a backup and don't even know the backup that it was about
+ * @param msg Full description of the failure
+ */
+ public BackupException(String msg) {
+ super(msg);
+ }
+
+ /**
+ * Some exception happened for a backup with a cause
+ * @param cause
+ */
+ public BackupException(Throwable cause) {
+ super(cause);
+ }
+
+ /**
+ * Exception for the given backup that has no previous root cause
+ * @param msg reason why the backup failed
+ * @param desc description of the backup that is being failed
+ */
+ public BackupException(String msg, BackupContext desc) {
+ super(msg);
+ this.description = desc;
+ }
+
+ /**
+ * Exception for the given backup due to another exception
+ * @param msg reason why the backup failed
+ * @param cause root cause of the failure
+ * @param desc description of the backup that is being failed
+ */
+ public BackupException(String msg, Throwable cause, BackupContext desc) {
+ super(msg, cause);
+ this.description = desc;
+ }
+
+ /**
+ * Exception when the description of the backup cannot be determined, due to some root other root
+ * cause
+ * @param message description of what caused the failure
+ * @param e root cause
+ */
+ public BackupException(String message, Exception e) {
+ super(message, e);
+ }
+
+ public BackupContext getBackupContext() {
+ return this.description;
+ }
+
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHandler.java
new file mode 100644
index 0000000..0ce38f4
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHandler.java
@@ -0,0 +1,832 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.concurrent.Callable;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupManifest.BackupImage;
+import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.KeeperException.NoNodeException;
+
+
+/**
+ * A Handler to carry the operations of backup progress
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupHandler implements Callable