{
+ RSRollLogTask() {
+ }
+
+ @Override
+ public Void call() throws Exception {
+ hlog = (FSHLog) rss.getWAL(null);
+ long filename = hlog.getFilenum();
+ LOG.info("Trying to roll log in backup subprocedure, current log number: " + filename);
+ hlog.rollWriter(true);
+ LOG.info("After roll log in backup subprocedure, current log number: " + hlog.getFilenum());
+ // write the log number to ZK.
+ String logrollZNode = ZKUtil.joinZNode(zkw.baseZNode, BackupRestoreConstants.LOG_ROLL_ZNODE);
+ String thisRSNode = ZKUtil.joinZNode(logrollZNode, rss.getServerName().getHostname());
+ ZKUtil.createSetData(zkw, thisRSNode, Long.toString(filename).getBytes());
+
+ return null;
+ }
+
+ }
+
+ private void rolllog() throws ForeignException {
+
+ monitor.rethrowException();
+
+ taskManager.submitTask(new RSRollLogTask());
+ monitor.rethrowException();
+
+ // wait for everything to complete.
+ taskManager.waitForOutstandingTasks();
+ monitor.rethrowException();
+
+ }
+
+ @Override
+ public void acquireBarrier() throws ForeignException {
+ // do nothing, executing in inside barrier step.
+ }
+
+ /**
+ * do a log roll.
+ * @return
+ */
+ @Override
+ public byte[] insideBarrier() throws ForeignException {
+ rolllog();
+ // FIXME
+ return null;
+ }
+
+ /**
+ * Cancel threads if they haven't finished.
+ */
+ @Override
+ public void cleanup(Exception e) {
+ taskManager.abort("Aborting log roll subprocedure tasks for backup due to error", e);
+ }
+
+ /**
+ * Hooray!
+ */
+ public void releaseBarrier() {
+ // NO OP
+ }
+
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/RegionServerBackupManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/RegionServerBackupManager.java
new file mode 100644
index 0000000..a200dcc
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/RegionServerBackupManager.java
@@ -0,0 +1,166 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.regionserver;
+
+
+import java.io.IOException;
+import java.util.concurrent.ThreadPoolExecutor;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.procedure.ProcedureMember;
+import org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs;
+import org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
+import org.apache.hadoop.hbase.procedure.Subprocedure;
+import org.apache.hadoop.hbase.procedure.SubprocedureFactory;
+import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
+
+import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
+
+/**
+ * This manager class handles the work dealing with backup for a {@link HRegionServer}.
+ *
+ * This provides the mechanism necessary to kick off a backup specific {@link Subprocedure} that is
+ * responsible by this region server. If any failures occur with the subprocedure, the manager's
+ * procedure member notifies the procedure coordinator to abort all others.
+ *
+ * On startup, requires {@link #start()} to be called.
+ *
+ * On shutdown, requires {@link #org.apache.hadoop.hbase.procedure.ProcedureMember.close()} to be
+ * called
+ */
+public class RegionServerBackupManager extends RegionServerProcedureManager {
+
+ private static final Log LOG = LogFactory.getLog(RegionServerBackupManager.class);
+
+ /** Conf key for number of request threads to start backup on regionservers */
+ public static final String BACKUP_REQUEST_THREADS_KEY = "hbase.backup.region.pool.threads";
+ /** # of threads for backup work on the rs. */
+ public static final int BACKUP_REQUEST_THREADS_DEFAULT = 10;
+
+ public static final String BACKUP_TIMEOUT_MILLIS_KEY = "hbase.backup.timeout";
+ public static final long BACKUP_TIMEOUT_MILLIS_DEFAULT = 60000;
+
+ /** Conf key for millis between checks to see if backup work completed or if there are errors */
+ public static final String BACKUP_REQUEST_WAKE_MILLIS_KEY = "hbase.backup.region.wakefrequency";
+ /** Default amount of time to check for errors while regions finish backup work */
+ private static final long BACKUP_REQUEST_WAKE_MILLIS_DEFAULT = 500;
+
+ private RegionServerServices rss;
+ private ProcedureMemberRpcs memberRpcs;
+ private ProcedureMember member;
+
+ /**
+ * Create a default backup procedure manager
+ */
+ public RegionServerBackupManager() {
+ }
+
+ /**
+ * Start accepting backup procedure requests.
+ */
+ @Override
+ public void start() {
+ this.memberRpcs.start(rss.getServerName().toString(), member);
+ LOG.info("Started region server backup manager.");
+ }
+
+ /**
+ * Close this and all running backup procedure tasks
+ * @param force forcefully stop all running tasks
+ * @throws IOException
+ */
+ @Override
+ public void stop(boolean force) throws IOException {
+ String mode = force ? "abruptly" : "gracefully";
+ LOG.info("Stopping RegionServerBackupManager " + mode + ".");
+
+ try {
+ this.member.close();
+ } finally {
+ this.memberRpcs.close();
+ }
+ }
+
+ /**
+ * If in a running state, creates the specified subprocedure for handling a backup procedure.
+ * @return Subprocedure to submit to the ProcedureMemeber.
+ */
+ public Subprocedure buildSubprocedure() {
+
+ // don't run a backup if the parent is stop(ping)
+ if (rss.isStopping() || rss.isStopped()) {
+ throw new IllegalStateException("Can't start backup procedure on RS: " + rss.getServerName()
+ + ", because stopping/stopped!");
+ }
+
+ LOG.info("Attempting to run a roll log procedure for backup.");
+ ForeignExceptionDispatcher errorDispatcher = new ForeignExceptionDispatcher();
+ Configuration conf = rss.getConfiguration();
+ long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT);
+ long wakeMillis =
+ conf.getLong(BACKUP_REQUEST_WAKE_MILLIS_KEY, BACKUP_REQUEST_WAKE_MILLIS_DEFAULT);
+
+ BackupSubprocedurePool taskManager =
+ new BackupSubprocedurePool(rss.getServerName().toString(), conf);
+ return new LogRollBackupSubprocedure(rss, member, errorDispatcher, wakeMillis, timeoutMillis,
+ taskManager);
+
+ }
+
+ /**
+ * Build the actual backup procedure runner that will do all the 'hard' work
+ */
+ public class BackupSubprocedureBuilder implements SubprocedureFactory {
+
+ @Override
+ public Subprocedure buildSubprocedure(String name, byte[] data) {
+ return RegionServerBackupManager.this.buildSubprocedure();
+ }
+ }
+
+ @Override
+ public void initialize(RegionServerServices rss) throws KeeperException {
+ this.rss = rss;
+ ZooKeeperWatcher zkw = rss.getZooKeeper();
+ this.memberRpcs =
+ new ZKProcedureMemberRpcs(zkw, LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE);
+
+ // read in the backup handler configuration properties
+ Configuration conf = rss.getConfiguration();
+ long keepAlive = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT);
+ int opThreads = conf.getInt(BACKUP_REQUEST_THREADS_KEY, BACKUP_REQUEST_THREADS_DEFAULT);
+ // create the actual cohort member
+ ThreadPoolExecutor pool =
+ ProcedureMember.defaultPool(rss.getServerName().toString(), opThreads, keepAlive);
+ this.member = new ProcedureMember(memberRpcs, pool, new BackupSubprocedureBuilder());
+ }
+
+ @Override
+ public String getProcedureSignature() {
+ return "backup-proc";
+ }
+
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
index b118ecd..adbb7cd 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
@@ -922,6 +922,16 @@ public class FSHLog implements WAL {
return computeFilename(this.filenum.get());
}
+
+ /**
+ * To support old API compatibility
+ * @return current file number (timestamp)
+ */
+ public long getFilenum()
+ {
+ return filenum.get();
+ }
+
@Override
public String toString() {
return "FSHLog " + logFilePrefix + ":" + logFileSuffix + "(num " + filenum + ")";
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCopy.java hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCopy.java
new file mode 100644
index 0000000..0360000
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCopy.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.snapshot;
+
+import org.apache.hadoop.hbase.backup.BackupHandler;
+
+/* this class will be extended in future jira to support progress report */
+public class SnapshotCopy extends ExportSnapshot {
+ private BackupHandler backupHandler;
+ private String table;
+
+ public SnapshotCopy(BackupHandler backupHandler, String table) {
+ super();
+ this.backupHandler = backupHandler;
+ this.table = table;
+ }
+
+ public BackupHandler getBackupHandler() {
+ return this.backupHandler;
+ }
+
+ public String getTable() {
+ return this.table;
+ }
+
+}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
new file mode 100644
index 0000000..c5bbc38
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -0,0 +1,180 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+/**
+ * This class is only a base for other integration-level backup tests.
+ * Do not add tests here.
+ * TestBackupSmallTests is where tests that don't require bring machines up/down should go
+ * All other tests should have their own classes and extend this one
+ */
+public class TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestBackupBase.class);
+
+ protected static Configuration conf1;
+ protected static Configuration conf2;
+
+ protected static ZooKeeperWatcher zkw1;
+ protected static ZooKeeperWatcher zkw2;
+
+ protected static HBaseTestingUtility TEST_UTIL;
+ protected static HBaseTestingUtility TEST_UTIL2;
+
+ protected static TableName table1;
+ protected static TableName table2;
+ protected static TableName table3;
+ protected static TableName table4;
+
+ protected static String table1_restore = "table1_restore";
+ protected static String table2_restore = "table2_restore";
+ protected static String table3_restore = "table3_restore";
+ protected static String table4_restore = "table4_restore";
+
+ protected static final int NB_ROWS_IN_BATCH = 100;
+ protected static final byte[] qualName = Bytes.toBytes("q1");
+ protected static final byte[] famName = Bytes.toBytes("f");
+
+ protected static String BACKUP_ROOT_DIR = "/backupUT";
+ protected static String BACKUP_REMOTE_ROOT_DIR = "/backupUT";
+
+ protected static final String BACKUP_ZNODE = "/backup/hbase";
+ protected static final String BACKUP_SUCCEED_NODE = "complete";
+ protected static final String BACKUP_FAILED_NODE = "failed";
+
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+
+ TEST_UTIL = new HBaseTestingUtility();
+ TEST_UTIL.getConfiguration().set("hbase.procedure.regionserver.classes",
+ "org.apache.hadoop.hbase.backup.regionserver.RegionServerBackupManager");
+ TEST_UTIL.getConfiguration().set("hbase.procedure.master.classes",
+ "org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager");
+ TEST_UTIL.getConfiguration().set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
+ TEST_UTIL.startMiniZKCluster();
+ MiniZooKeeperCluster miniZK = TEST_UTIL.getZkCluster();
+
+ conf1 = TEST_UTIL.getConfiguration();
+ conf2 = HBaseConfiguration.create(conf1);
+ conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
+ TEST_UTIL2 = new HBaseTestingUtility(conf2);
+ TEST_UTIL2.setZkCluster(miniZK);
+ TEST_UTIL.startMiniCluster();
+ TEST_UTIL2.startMiniCluster();
+ conf1 = TEST_UTIL.getConfiguration();
+
+ TEST_UTIL.startMiniMapReduceCluster();
+ BACKUP_ROOT_DIR = TEST_UTIL.getConfiguration().get("fs.defaultFS") + "/backupUT";
+ LOG.info("ROOTDIR " + BACKUP_ROOT_DIR);
+ BACKUP_REMOTE_ROOT_DIR = TEST_UTIL2.getConfiguration().get("fs.defaultFS") + "/backupUT";
+ LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR);
+
+ zkw1 = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL); //
+ // zkw2 = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL2);
+ BackupClient.setConf(conf1);
+ RestoreClient.setConf(conf1);
+ createTables();
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getHBaseAdmin());
+ SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL);
+ zkw1.close();
+ TEST_UTIL2.shutdownMiniCluster();
+ TEST_UTIL.shutdownMiniCluster();
+ TEST_UTIL.shutdownMiniMapReduceCluster();
+ }
+
+ protected static void loadTable(HTable table) throws Exception {
+
+ Put p; // 100 + 1 row to t1_syncup
+ for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+ p = new Put(Bytes.toBytes("row" + i));
+ p.add(famName, qualName, Bytes.toBytes("val" + i));
+ table.put(p);
+ }
+ }
+
+ protected static void createTables() throws Exception {
+
+ long tid = System.currentTimeMillis();
+ table1 = TableName.valueOf("test-" + tid);
+
+ HBaseAdmin ha = TEST_UTIL.getHBaseAdmin();
+ HTableDescriptor desc = new HTableDescriptor(table1);
+ HColumnDescriptor fam = new HColumnDescriptor(famName);
+ desc.addFamily(fam);
+ ha.createTable(desc);
+ Connection conn = ConnectionFactory.createConnection(conf1);
+
+ HTable table = (HTable) conn.getTable(table1);
+ loadTable(table);
+ table.close();
+
+ table2 = TableName.valueOf("test-" + tid + 1);
+ desc = new HTableDescriptor(table2);
+ desc.addFamily(fam);
+ ha.createTable(desc);
+ table = (HTable) conn.getTable(table2);
+ loadTable(table);
+ table.close();
+
+ table3 = TableName.valueOf("test-" + tid + 2);
+ table = TEST_UTIL.createTable(table3.getName(), famName);
+ table.close();
+
+ table4 = TableName.valueOf("test-" + tid + 3);
+ table = TEST_UTIL.createTable(table4.getName(), famName);
+ table.close();
+ ha.close();
+ conn.close();
+ }
+
+
+}
+
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
new file mode 100644
index 0000000..07650fa
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.hamcrest.CoreMatchers;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestBackupBoundaryTests extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestBackupBoundaryTests.class);
+
+ /**
+ * Verify that full backup is created on a single empty table correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupSingleEmpty() throws Exception {
+
+ LOG.info("create full backup image on single table");
+ LOG.info(ZKUtil.listChildrenNoWatch(zkw1, zkw1.baseZNode).toString());
+
+ String backupId =
+ BackupClient.create("full", BACKUP_ROOT_DIR, table3.getNameAsString(), null);
+ LOG.info("Finished Backup");
+ String succeedZNode = ZKUtil.joinZNode(BACKUP_ZNODE, BACKUP_SUCCEED_NODE);
+ String backupNode = ZKUtil.joinZNode(succeedZNode, backupId);
+ Assert.assertThat(ZKUtil.checkExists(zkw1, backupNode), CoreMatchers.not(-1));
+
+ }
+
+ /**
+ * Verify that full backup is created on multiple empty tables correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupMultipleEmpty() throws Exception {
+ LOG.info("create full backup image on mulitple empty tables");
+ String tableset =
+ table3.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table4.getNameAsString();
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ String succeedZNode = ZKUtil.joinZNode(BACKUP_ZNODE, BACKUP_SUCCEED_NODE);
+ String backupNode = ZKUtil.joinZNode(succeedZNode, backupId);
+ Assert.assertThat(ZKUtil.checkExists(zkw1, backupNode), CoreMatchers.not(-1));
+ }
+
+ /**
+ * Verify that full backup fails on a single table that does not exist.
+ * @throws Exception
+ */
+ @Test(expected = RuntimeException.class)
+ public void testFullBackupSingleDNE() throws Exception {
+
+ LOG.info("test full backup fails on a single table that does not exist");
+ BackupClient.create("full", BACKUP_ROOT_DIR, "tabledne", null);
+ }
+
+ /**
+ * Verify that full backup fails on multiple tables that do not exist.
+ * @throws Exception
+ */
+ @Test(expected = RuntimeException.class)
+ public void testFullBackupMultipleDNE() throws Exception {
+
+ LOG.info("test full backup fails on multiple tables that do not exist");
+ BackupClient.create("full", BACKUP_ROOT_DIR, "table1dne,table2dne", null);
+ }
+
+ /**
+ * Verify that full backup fails on tableset containing real and fake tables.
+ * @throws Exception
+ */
+ @Test(expected = RuntimeException.class)
+ public void testFullBackupMixExistAndDNE() throws Exception {
+ LOG.info("create full backup fails on tableset containing real and fake table");
+ String tableset =
+ table1.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + "tabledne";
+ BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ }
+
+}
\ No newline at end of file
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java
new file mode 100644
index 0000000..0b258bb
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+ * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
+ * for the specific language governing permissions and limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.hamcrest.CoreMatchers;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestFullBackup extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestFullBackup.class);
+
+ /**
+ * Verify that full backup is created on a single table with data correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupSingle() throws Exception {
+
+ LOG.info("test full backup on a single table with data");
+ String backupId =
+ BackupClient.create("full", BACKUP_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ String succeedZNode = ZKUtil.joinZNode(BACKUP_ZNODE, BACKUP_SUCCEED_NODE);
+ String backupNode = ZKUtil.joinZNode(succeedZNode, backupId);
+ Assert.assertThat(ZKUtil.checkExists(zkw1, backupNode), CoreMatchers.not(-1));
+ }
+
+ /**
+ * Verify that full backup is created on multiple tables correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupMultiple() throws Exception {
+ LOG.info("create full backup image on multiple tables with data");
+ String tableset =
+ table1.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table2.getNameAsString();
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ String succeedZNode = ZKUtil.joinZNode(BACKUP_ZNODE, BACKUP_SUCCEED_NODE);
+ String backupNode = ZKUtil.joinZNode(succeedZNode, backupId);
+ Assert.assertThat(ZKUtil.checkExists(zkw1, backupNode), CoreMatchers.not(-1));
+ }
+
+ /**
+ * Verify that full backup is created on all tables correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupAll() throws Exception {
+ LOG.info("create full backup image on all tables");
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, null, null);
+ String succeedZNode = ZKUtil.joinZNode(BACKUP_ZNODE, BACKUP_SUCCEED_NODE);
+ String backupNode = ZKUtil.joinZNode(succeedZNode, backupId);
+ Assert.assertThat(ZKUtil.checkExists(zkw1, backupNode), CoreMatchers.not(-1));
+ }
+
+ /**
+ * Verify that full backup is created on a table correctly using a snapshot.
+ * @throws Exception
+ */
+ //@Test
+ //public void testFullBackupUsingSnapshot() throws Exception {
+ // HBaseAdmin hba = new HBaseAdmin(conf1);
+ //String snapshot = "snapshot";
+ //hba.snapshot(snapshot, table1);
+ //LOG.info("create full backup image on a table using snapshot");
+ //String backupId =
+ // BackupClient.create("full", BACKUP_ROOT_DIR, table1.getNameAsString(),
+ // snapshot);
+ // }
+
+}
\ No newline at end of file
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
new file mode 100644
index 0000000..7211b67
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
@@ -0,0 +1,180 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+ * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
+ * for the specific language governing permissions and limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.hamcrest.CoreMatchers;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestFullRestore extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestFullRestore.class);
+
+ /**
+ * Verify that a single table is restored to a new table
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreSingle() throws Exception {
+
+ LOG.info("test full restore on a single table empty table");
+ String backupId =
+ BackupClient.create("full", BACKUP_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ String succeedZNode = ZKUtil.joinZNode(BACKUP_ZNODE, BACKUP_SUCCEED_NODE);
+ String backupNode = ZKUtil.joinZNode(succeedZNode, backupId);
+ Assert.assertThat(ZKUtil.checkExists(zkw1, backupNode), CoreMatchers.not(-1));
+ String[] tableset = new String[] { table1.getNameAsString() };
+ String[] tablemap = new String[] { table1_restore };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap,
+ false);
+ HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hba.tableExists(table1_restore));
+ TEST_UTIL.deleteTable(table1_restore);
+ hba.close();
+ }
+
+ /**
+ * Verify that multiple tables are restored to new tables.
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreMultiple() throws Exception {
+ LOG.info("create full backup image on multiple tables");
+ String tableset =
+ table2.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table3.getNameAsString();
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ String succeedZNode = ZKUtil.joinZNode(BACKUP_ZNODE, BACKUP_SUCCEED_NODE);
+ String backupNode = ZKUtil.joinZNode(succeedZNode, backupId);
+ Assert.assertThat(ZKUtil.checkExists(zkw1, backupNode), CoreMatchers.not(-1));
+ String[] restore_tableset = new String[] { table2.getNameAsString(), table3.getNameAsString() };
+ String[] tablemap = new String[] { table2_restore, table3_restore };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient
+.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false, restore_tableset,
+ tablemap,
+ false);
+ HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hba.tableExists(table2_restore));
+ assertTrue(hba.tableExists(table3_restore));
+ TEST_UTIL.deleteTable(table2_restore);
+ TEST_UTIL.deleteTable(table3_restore);
+ hba.close();
+ }
+
+ /**
+ * Verify that a single table is restored using overwrite
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreSingleOverwrite() throws Exception {
+
+ LOG.info("test full restore on a single table empty table");
+ String backupId =
+ BackupClient.create("full", BACKUP_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ String succeedZNode = ZKUtil.joinZNode(BACKUP_ZNODE, BACKUP_SUCCEED_NODE);
+ String backupNode = ZKUtil.joinZNode(succeedZNode, backupId);
+ Assert.assertThat(ZKUtil.checkExists(zkw1, backupNode), CoreMatchers.not(-1));
+ String[] tableset = new String[] { table1.getNameAsString() };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false, tableset, null,
+ true);
+ }
+
+ /**
+ * Verify that multiple tables are restored to new tables using overwrite.
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreMultipleOverwrite() throws Exception {
+ LOG.info("create full backup image on multiple tables");
+ String tableset =
+ table2.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table3.getNameAsString();
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ String succeedZNode = ZKUtil.joinZNode(BACKUP_ZNODE, BACKUP_SUCCEED_NODE);
+ String backupNode = ZKUtil.joinZNode(succeedZNode, backupId);
+ Assert.assertThat(ZKUtil.checkExists(zkw1, backupNode), CoreMatchers.not(-1));
+ String[] restore_tableset = new String[] { table2.getNameAsString(), table3.getNameAsString() };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient
+.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false, restore_tableset,
+ null,
+ true);
+ }
+
+ /**
+ * Verify that restore fails on a single table that does not exist.
+ * @throws Exception
+ */
+ @Test(expected = IOException.class)
+ public void testFullRestoreSingleDNE() throws Exception {
+
+ LOG.info("test restore fails on a single table that does not exist");
+ String backupId =
+ BackupClient.create("full", BACKUP_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ String succeedZNode = ZKUtil.joinZNode(BACKUP_ZNODE, BACKUP_SUCCEED_NODE);
+ String backupNode = ZKUtil.joinZNode(succeedZNode, backupId);
+ Assert.assertThat(ZKUtil.checkExists(zkw1, backupNode), CoreMatchers.not(-1));
+ String[] tableset = new String[] { "faketable" };
+ String[] tablemap = new String[] { table1_restore };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap,
+ false);
+ }
+
+ /**
+ * Verify that restore fails on multiple tables that do not exist.
+ * @throws Exception
+ */
+ @Test(expected = IOException.class)
+ public void testFullRestoreMultipleDNE() throws Exception {
+
+ LOG.info("test restore fails on multiple tables that do not exist");
+ String tableset =
+ table2.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table3.getNameAsString();
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ String succeedZNode = ZKUtil.joinZNode(BACKUP_ZNODE, BACKUP_SUCCEED_NODE);
+ String backupNode = ZKUtil.joinZNode(succeedZNode, backupId);
+ Assert.assertThat(ZKUtil.checkExists(zkw1, backupNode), CoreMatchers.not(-1));
+ String[] restore_tableset = new String[] { "faketable1", "faketable2" };
+ String[] tablemap = new String[] { table2_restore, table3_restore };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient
+.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false, restore_tableset,
+ tablemap,
+ false);
+ }
+}
\ No newline at end of file
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
new file mode 100644
index 0000000..35d3558
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
@@ -0,0 +1,181 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.hamcrest.CoreMatchers;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestIncrementalBackup extends TestBackupBase {
+ private static final Log LOG = LogFactory.getLog(TestIncrementalBackup.class);
+ //implement all testcases in 1 test since incremental backup/restore has dependencies
+ @Test
+ public void TestIncBackupRestore() throws Exception {
+ HBackupFileSystem hbfs;
+
+ // #1 - create full backup for all tables
+ LOG.info("create full backup image for all tables");
+ String tablesetFull =
+ table1.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table2.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table3.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table4.getNameAsString();
+
+ String backupIdFull =
+ BackupClient.create("full", BACKUP_ROOT_DIR, tablesetFull, null);
+ String succeedZNode = ZKUtil.joinZNode(BACKUP_ZNODE, BACKUP_SUCCEED_NODE);
+ String backupNode = ZKUtil.joinZNode(succeedZNode, backupIdFull);
+ Assert.assertThat(ZKUtil.checkExists(zkw1, backupNode), CoreMatchers.not(-1));
+ Connection conn = ConnectionFactory.createConnection(conf1);
+ // #2 - insert some data to table
+ HTable t1 = (HTable) conn.getTable(table1);
+ Put p1;
+ for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+ p1 = new Put(Bytes.toBytes("row-t1" + i));
+ p1.add(famName, qualName, Bytes.toBytes("val" + i));
+ t1.put(p1);
+ }
+
+ Assert.assertThat(TEST_UTIL.countRows(t1), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2));
+ t1.close();
+
+ HTable t2 = (HTable) conn.getTable(table2);
+ Put p2;
+ for (int i = 0; i < 5; i++) {
+ p2 = new Put(Bytes.toBytes("row-t2" + i));
+ p2.add(famName, qualName, Bytes.toBytes("val" + i));
+ t2.put(p2);
+ }
+
+ Assert.assertThat(TEST_UTIL.countRows(t2), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5));
+ t2.close();
+
+ // #3 - incremental backup for multiple tables
+ String tablesetIncMultiple =
+ table1.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table2.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table3.getNameAsString();
+
+ String backupIdIncMultiple = BackupClient.create("incremental", BACKUP_ROOT_DIR,
+ tablesetIncMultiple, null);
+ String backupNodeIncMultiple = ZKUtil.joinZNode(succeedZNode, backupIdIncMultiple);
+ Assert.assertThat(ZKUtil.checkExists(zkw1, backupNodeIncMultiple), CoreMatchers.not(-1));
+
+ // #4 - restore full backup for all tables, without overwrite
+ String[] tablesRestoreFull =
+ new String[] { table1.getNameAsString(), table2.getNameAsString(),
+ table3.getNameAsString(), table4.getNameAsString() };
+
+ String[] tablesMapFull =
+ new String[] { table1_restore, table2_restore, table3_restore, table4_restore };
+
+ hbfs = new HBackupFileSystem(conf1, new Path(BACKUP_ROOT_DIR), backupIdFull);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupIdFull, false, false,
+ tablesRestoreFull,
+ tablesMapFull, false);
+
+ // #5.1 - check tables for full restore
+ HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hAdmin.tableExists(table1_restore));
+ assertTrue(hAdmin.tableExists(table2_restore));
+ assertTrue(hAdmin.tableExists(table3_restore));
+ assertTrue(hAdmin.tableExists(table4_restore));
+
+ hAdmin.close();
+
+ // #5.2 - checking row count of tables for full restore
+ HTable hTable = (HTable) conn.getTable(TableName.valueOf(table1_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH));
+ hTable.close();
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table2_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH));
+ hTable.close();
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table3_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+ hTable.close();
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table4_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+ hTable.close();
+
+ // #6 - restore incremental backup for multiple tables, with overwrite
+ String[] tablesRestoreIncMultiple =
+ new String[]
+ { table1.getNameAsString(), table2.getNameAsString(), table3.getNameAsString() };
+ String[] tablesMapIncMultiple =
+ new String[] { table1_restore, table2_restore, table3_restore };
+ hbfs = new HBackupFileSystem(conf1, new Path(BACKUP_ROOT_DIR), backupIdIncMultiple);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupIdIncMultiple, false, false,
+ tablesRestoreIncMultiple, tablesMapIncMultiple, true);
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table1_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2));
+ hTable.close();
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table2_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5));
+ hTable.close();
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table3_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+ hTable.close();
+
+ // #7 - incremental backup for single, empty table
+
+ String tablesetIncEmpty = table4.getNameAsString();
+ String backupIdIncEmpty =
+ BackupClient.create("incremental", BACKUP_ROOT_DIR, tablesetIncEmpty, null);
+ String backupNodeIncEmpty = ZKUtil.joinZNode(succeedZNode, backupIdIncEmpty);
+ Assert.assertThat(ZKUtil.checkExists(zkw1, backupNodeIncEmpty), CoreMatchers.not(-1));
+
+ // #8 - restore incremental backup for single empty table, with overwrite
+ String[] tablesRestoreIncEmpty = new String[] { table4.getNameAsString() };
+ String[] tablesMapIncEmpty = new String[] { table4_restore };
+ hbfs = new HBackupFileSystem(conf1, new Path(BACKUP_ROOT_DIR), backupIdIncEmpty);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupIdIncEmpty, false, false,
+ tablesRestoreIncEmpty,
+ tablesMapIncEmpty, true);
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table4_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+ hTable.close();
+ conn.close();
+
+ }
+
+}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
new file mode 100644
index 0000000..85e6ba8
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+ * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
+ * for the specific language governing permissions and limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.hamcrest.CoreMatchers;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestRemoteBackup extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestRemoteBackup.class);
+
+ /**
+ * Verify that a remote full backup is created on a single table with data correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupRemote() throws Exception {
+
+ LOG.info("test remote full backup on a single table");
+
+ // String rootdir = TEST_UTIL2.getDefaultRootDirPath() + BACKUP_ROOT_DIR;
+ // LOG.info("ROOTDIR " + rootdir);
+ String backupId =
+ BackupClient.create("full", BACKUP_REMOTE_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ String succeedZNode = ZKUtil.joinZNode(BACKUP_ZNODE, BACKUP_SUCCEED_NODE);
+ String backupNode = ZKUtil.joinZNode(succeedZNode, backupId);
+ Assert.assertThat(ZKUtil.checkExists(zkw1, backupNode), CoreMatchers.not(-1));
+ }
+
+}
\ No newline at end of file
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
new file mode 100644
index 0000000..73a9225
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+ * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
+ * for the specific language governing permissions and limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.hamcrest.CoreMatchers;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestRemoteRestore extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestRemoteRestore.class);
+
+ /**
+ * Verify that a remote restore on a single table is successful.
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreRemote() throws Exception {
+
+ LOG.info("test remote full backup on a single table");
+ String backupId =
+ BackupClient.create("full", BACKUP_REMOTE_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ String succeedZNode = ZKUtil.joinZNode(BACKUP_ZNODE, BACKUP_SUCCEED_NODE);
+ String backupNode = ZKUtil.joinZNode(succeedZNode, backupId);
+ Assert.assertThat(ZKUtil.checkExists(zkw1, backupNode), CoreMatchers.not(-1));
+ String[] tableset = new String[] { table1.getNameAsString() };
+ String[] tablemap = new String[] { table1_restore };
+ Path path = new Path(BACKUP_REMOTE_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient.restore_stage1(hbfs, BACKUP_REMOTE_ROOT_DIR, backupId, false, false, tableset,
+ tablemap, false);
+ HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hba.tableExists(table1_restore));
+ TEST_UTIL.deleteTable(table1_restore);
+ hba.close();
+ }
+
+}
\ No newline at end of file
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
new file mode 100644
index 0000000..0ec156c
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.hamcrest.CoreMatchers;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestRestoreBoundaryTests extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestRestoreBoundaryTests.class);
+
+ /**
+ * Verify that a single empty table is restored to a new table
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreSingleEmpty() throws Exception {
+
+ LOG.info("test full restore on a single table empty table");
+ String backupId =
+ BackupClient.create("full", BACKUP_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ String succeedZNode = ZKUtil.joinZNode(BACKUP_ZNODE, BACKUP_SUCCEED_NODE);
+ String backupNode = ZKUtil.joinZNode(succeedZNode, backupId);
+ Assert.assertThat(ZKUtil.checkExists(zkw1, backupNode), CoreMatchers.not(-1));
+ String[] tableset = new String[] { table1.getNameAsString() };
+ String[] tablemap = new String[] { table1_restore };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap,
+ false);
+ HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hba.tableExists(table1_restore));
+ TEST_UTIL.deleteTable(table1_restore);
+ }
+
+ /**
+ * Verify that multiple tables are restored to new tables.
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreMultipleEmpty() throws Exception {
+ LOG.info("create full backup image on multiple tables");
+ String tableset =
+ table2.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table3.getNameAsString();
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ String succeedZNode = ZKUtil.joinZNode(BACKUP_ZNODE, BACKUP_SUCCEED_NODE);
+ String backupNode = ZKUtil.joinZNode(succeedZNode, backupId);
+ Assert.assertThat(ZKUtil.checkExists(zkw1, backupNode), CoreMatchers.not(-1));
+ String[] restore_tableset = new String[] { table2.getNameAsString(), table3.getNameAsString() };
+ String[] tablemap = new String[] { table2_restore, table3_restore };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false, restore_tableset,
+ tablemap,
+ false);
+ HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hba.tableExists(table2_restore));
+ assertTrue(hba.tableExists(table3_restore));
+ TEST_UTIL.deleteTable(table2_restore);
+ TEST_UTIL.deleteTable(table3_restore);
+ }
+}
\ No newline at end of file