{
+ RSRollLogTask() {
+ }
+
+ @Override
+ public Void call() throws Exception {
+ hlog = (FSHLog) rss.getWAL(null);
+ long filename = hlog.getFilenum();
+ LOG.info("Trying to roll log in backup subprocedure, current log number: " + filename);
+ hlog.rollWriter(true);
+ LOG.info("After roll log in backup subprocedure, current log number: " + hlog.getFilenum());
+ // write the log number to hbase:backup.
+ BackupSystemTable table = BackupSystemTable.getTable(rss.getConfiguration());
+ table.writeRegionServerLastLogRollResult(rss.getServerName().getHostname(), Long.toString(filename));
+ //TODO: potential leak of HBase connection
+ //BackupSystemTable.close();
+ return null;
+ }
+
+ }
+
+ private void rolllog() throws ForeignException {
+
+ monitor.rethrowException();
+
+ taskManager.submitTask(new RSRollLogTask());
+ monitor.rethrowException();
+
+ // wait for everything to complete.
+ taskManager.waitForOutstandingTasks();
+ monitor.rethrowException();
+
+ }
+
+ @Override
+ public void acquireBarrier() throws ForeignException {
+ // do nothing, executing in inside barrier step.
+ }
+
+ /**
+ * do a log roll.
+ * @return
+ */
+ @Override
+ public byte[] insideBarrier() throws ForeignException {
+ rolllog();
+ // FIXME
+ return null;
+ }
+
+ /**
+ * Cancel threads if they haven't finished.
+ */
+ @Override
+ public void cleanup(Exception e) {
+ taskManager.abort("Aborting log roll subprocedure tasks for backup due to error", e);
+ }
+
+ /**
+ * Hooray!
+ */
+ public void releaseBarrier() {
+ // NO OP
+ }
+
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/RegionServerBackupManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/RegionServerBackupManager.java
new file mode 100644
index 0000000..a200dcc
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/RegionServerBackupManager.java
@@ -0,0 +1,166 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.regionserver;
+
+
+import java.io.IOException;
+import java.util.concurrent.ThreadPoolExecutor;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.procedure.ProcedureMember;
+import org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs;
+import org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
+import org.apache.hadoop.hbase.procedure.Subprocedure;
+import org.apache.hadoop.hbase.procedure.SubprocedureFactory;
+import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
+
+import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
+
+/**
+ * This manager class handles the work dealing with backup for a {@link HRegionServer}.
+ *
+ * This provides the mechanism necessary to kick off a backup specific {@link Subprocedure} that is
+ * responsible by this region server. If any failures occur with the subprocedure, the manager's
+ * procedure member notifies the procedure coordinator to abort all others.
+ *
+ * On startup, requires {@link #start()} to be called.
+ *
+ * On shutdown, requires {@link #org.apache.hadoop.hbase.procedure.ProcedureMember.close()} to be
+ * called
+ */
+public class RegionServerBackupManager extends RegionServerProcedureManager {
+
+ private static final Log LOG = LogFactory.getLog(RegionServerBackupManager.class);
+
+ /** Conf key for number of request threads to start backup on regionservers */
+ public static final String BACKUP_REQUEST_THREADS_KEY = "hbase.backup.region.pool.threads";
+ /** # of threads for backup work on the rs. */
+ public static final int BACKUP_REQUEST_THREADS_DEFAULT = 10;
+
+ public static final String BACKUP_TIMEOUT_MILLIS_KEY = "hbase.backup.timeout";
+ public static final long BACKUP_TIMEOUT_MILLIS_DEFAULT = 60000;
+
+ /** Conf key for millis between checks to see if backup work completed or if there are errors */
+ public static final String BACKUP_REQUEST_WAKE_MILLIS_KEY = "hbase.backup.region.wakefrequency";
+ /** Default amount of time to check for errors while regions finish backup work */
+ private static final long BACKUP_REQUEST_WAKE_MILLIS_DEFAULT = 500;
+
+ private RegionServerServices rss;
+ private ProcedureMemberRpcs memberRpcs;
+ private ProcedureMember member;
+
+ /**
+ * Create a default backup procedure manager
+ */
+ public RegionServerBackupManager() {
+ }
+
+ /**
+ * Start accepting backup procedure requests.
+ */
+ @Override
+ public void start() {
+ this.memberRpcs.start(rss.getServerName().toString(), member);
+ LOG.info("Started region server backup manager.");
+ }
+
+ /**
+ * Close this and all running backup procedure tasks
+ * @param force forcefully stop all running tasks
+ * @throws IOException
+ */
+ @Override
+ public void stop(boolean force) throws IOException {
+ String mode = force ? "abruptly" : "gracefully";
+ LOG.info("Stopping RegionServerBackupManager " + mode + ".");
+
+ try {
+ this.member.close();
+ } finally {
+ this.memberRpcs.close();
+ }
+ }
+
+ /**
+ * If in a running state, creates the specified subprocedure for handling a backup procedure.
+ * @return Subprocedure to submit to the ProcedureMemeber.
+ */
+ public Subprocedure buildSubprocedure() {
+
+ // don't run a backup if the parent is stop(ping)
+ if (rss.isStopping() || rss.isStopped()) {
+ throw new IllegalStateException("Can't start backup procedure on RS: " + rss.getServerName()
+ + ", because stopping/stopped!");
+ }
+
+ LOG.info("Attempting to run a roll log procedure for backup.");
+ ForeignExceptionDispatcher errorDispatcher = new ForeignExceptionDispatcher();
+ Configuration conf = rss.getConfiguration();
+ long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT);
+ long wakeMillis =
+ conf.getLong(BACKUP_REQUEST_WAKE_MILLIS_KEY, BACKUP_REQUEST_WAKE_MILLIS_DEFAULT);
+
+ BackupSubprocedurePool taskManager =
+ new BackupSubprocedurePool(rss.getServerName().toString(), conf);
+ return new LogRollBackupSubprocedure(rss, member, errorDispatcher, wakeMillis, timeoutMillis,
+ taskManager);
+
+ }
+
+ /**
+ * Build the actual backup procedure runner that will do all the 'hard' work
+ */
+ public class BackupSubprocedureBuilder implements SubprocedureFactory {
+
+ @Override
+ public Subprocedure buildSubprocedure(String name, byte[] data) {
+ return RegionServerBackupManager.this.buildSubprocedure();
+ }
+ }
+
+ @Override
+ public void initialize(RegionServerServices rss) throws KeeperException {
+ this.rss = rss;
+ ZooKeeperWatcher zkw = rss.getZooKeeper();
+ this.memberRpcs =
+ new ZKProcedureMemberRpcs(zkw, LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE);
+
+ // read in the backup handler configuration properties
+ Configuration conf = rss.getConfiguration();
+ long keepAlive = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT);
+ int opThreads = conf.getInt(BACKUP_REQUEST_THREADS_KEY, BACKUP_REQUEST_THREADS_DEFAULT);
+ // create the actual cohort member
+ ThreadPoolExecutor pool =
+ ProcedureMember.defaultPool(rss.getServerName().toString(), opThreads, keepAlive);
+ this.member = new ProcedureMember(memberRpcs, pool, new BackupSubprocedureBuilder());
+ }
+
+ @Override
+ public String getProcedureSignature() {
+ return "backup-proc";
+ }
+
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
index c067fc3..23b5851 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
@@ -85,6 +85,9 @@ public class WALPlayer extends Configured implements Tool {
private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name";
+ public WALPlayer(){
+ }
+
protected WALPlayer(final Configuration c) {
super(c);
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
index b118ecd..adbb7cd 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
@@ -922,6 +922,16 @@ public class FSHLog implements WAL {
return computeFilename(this.filenum.get());
}
+
+ /**
+ * To support old API compatibility
+ * @return current file number (timestamp)
+ */
+ public long getFilenum()
+ {
+ return filenum.get();
+ }
+
@Override
public String toString() {
return "FSHLog " + logFilePrefix + ":" + logFileSuffix + "(num " + filenum + ")";
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCopy.java hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCopy.java
new file mode 100644
index 0000000..0360000
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotCopy.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.snapshot;
+
+import org.apache.hadoop.hbase.backup.BackupHandler;
+
+/* this class will be extended in future jira to support progress report */
+public class SnapshotCopy extends ExportSnapshot {
+ private BackupHandler backupHandler;
+ private String table;
+
+ public SnapshotCopy(BackupHandler backupHandler, String table) {
+ super();
+ this.backupHandler = backupHandler;
+ this.table = table;
+ }
+
+ public BackupHandler getBackupHandler() {
+ return this.backupHandler;
+ }
+
+ public String getTable() {
+ return this.table;
+ }
+
+}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
new file mode 100644
index 0000000..efc42f9
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -0,0 +1,197 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupHandler.BACKUPSTATUS;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+/**
+ * This class is only a base for other integration-level backup tests.
+ * Do not add tests here.
+ * TestBackupSmallTests is where tests that don't require bring machines up/down should go
+ * All other tests should have their own classes and extend this one
+ */
+public class TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestBackupBase.class);
+
+ protected static Configuration conf1;
+ protected static Configuration conf2;
+
+ protected static HBaseTestingUtility TEST_UTIL;
+ protected static HBaseTestingUtility TEST_UTIL2;
+
+ protected static TableName table1;
+ protected static TableName table2;
+ protected static TableName table3;
+ protected static TableName table4;
+
+ protected static String table1_restore = "table1_restore";
+ protected static String table2_restore = "table2_restore";
+ protected static String table3_restore = "table3_restore";
+ protected static String table4_restore = "table4_restore";
+
+ protected static final int NB_ROWS_IN_BATCH = 100;
+ protected static final byte[] qualName = Bytes.toBytes("q1");
+ protected static final byte[] famName = Bytes.toBytes("f");
+
+ protected static String BACKUP_ROOT_DIR = "/backupUT";
+ protected static String BACKUP_REMOTE_ROOT_DIR = "/backupUT";
+
+ protected static final String BACKUP_ZNODE = "/backup/hbase";
+ protected static final String BACKUP_SUCCEED_NODE = "complete";
+ protected static final String BACKUP_FAILED_NODE = "failed";
+
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+
+ TEST_UTIL = new HBaseTestingUtility();
+ TEST_UTIL.getConfiguration().set("hbase.procedure.regionserver.classes",
+ "org.apache.hadoop.hbase.backup.regionserver.RegionServerBackupManager");
+ TEST_UTIL.getConfiguration().set("hbase.procedure.master.classes",
+ "org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager");
+ TEST_UTIL.getConfiguration().set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
+ TEST_UTIL.startMiniZKCluster();
+ MiniZooKeeperCluster miniZK = TEST_UTIL.getZkCluster();
+
+ conf1 = TEST_UTIL.getConfiguration();
+ conf2 = HBaseConfiguration.create(conf1);
+ conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
+ TEST_UTIL2 = new HBaseTestingUtility(conf2);
+ TEST_UTIL2.setZkCluster(miniZK);
+ TEST_UTIL.startMiniCluster();
+ TEST_UTIL2.startMiniCluster();
+ conf1 = TEST_UTIL.getConfiguration();
+
+ TEST_UTIL.startMiniMapReduceCluster();
+ BACKUP_ROOT_DIR = TEST_UTIL.getConfiguration().get("fs.defaultFS") + "/backupUT";
+ LOG.info("ROOTDIR " + BACKUP_ROOT_DIR);
+ BACKUP_REMOTE_ROOT_DIR = TEST_UTIL2.getConfiguration().get("fs.defaultFS") + "/backupUT";
+ LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR);
+
+ BackupClient.setConf(conf1);
+ RestoreClient.setConf(conf1);
+ createTables();
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getHBaseAdmin());
+ SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL);
+ //zkw1.close();
+ TEST_UTIL2.shutdownMiniCluster();
+ TEST_UTIL.shutdownMiniCluster();
+ TEST_UTIL.shutdownMiniMapReduceCluster();
+ }
+
+ protected static void loadTable(HTable table) throws Exception {
+
+ Put p; // 100 + 1 row to t1_syncup
+ for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+ p = new Put(Bytes.toBytes("row" + i));
+ p.add(famName, qualName, Bytes.toBytes("val" + i));
+ table.put(p);
+ }
+ }
+
+ protected static void createTables() throws Exception {
+
+ long tid = System.currentTimeMillis();
+ table1 = TableName.valueOf("test-" + tid);
+
+ HBaseAdmin ha = TEST_UTIL.getHBaseAdmin();
+ HTableDescriptor desc = new HTableDescriptor(table1);
+ HColumnDescriptor fam = new HColumnDescriptor(famName);
+ desc.addFamily(fam);
+ ha.createTable(desc);
+ Connection conn = ConnectionFactory.createConnection(conf1);
+
+ HTable table = (HTable) conn.getTable(table1);
+ loadTable(table);
+ table.close();
+
+ table2 = TableName.valueOf("test-" + tid + 1);
+ desc = new HTableDescriptor(table2);
+ desc.addFamily(fam);
+ ha.createTable(desc);
+ table = (HTable) conn.getTable(table2);
+ loadTable(table);
+ table.close();
+
+ table3 = TableName.valueOf("test-" + tid + 2);
+ table = TEST_UTIL.createTable(table3.getName(), famName);
+ table.close();
+
+ table4 = TableName.valueOf("test-" + tid + 3);
+ table = TEST_UTIL.createTable(table4.getName(), famName);
+ table.close();
+ ha.close();
+ conn.close();
+ }
+
+ protected boolean checkSucceeded(String backupId) throws IOException
+ {
+ BackupContext status = getBackupContext(backupId);
+ if(status == null) return false;
+ return status.getFlag() == BACKUPSTATUS.COMPLETE;
+ }
+
+ protected boolean checkFailed(String backupId) throws IOException
+ {
+ BackupContext status = getBackupContext(backupId);
+ if(status == null) return false;
+ return status.getFlag() == BACKUPSTATUS.FAILED;
+ }
+
+ private BackupContext getBackupContext(String backupId) throws IOException
+ {
+ Configuration conf = BackupClient.getConf();
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+ BackupContext status = table.readBackupStatus(backupId);
+ return status;
+ }
+}
+
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
new file mode 100644
index 0000000..7f5031e
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestBackupBoundaryTests extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestBackupBoundaryTests.class);
+
+ /**
+ * Verify that full backup is created on a single empty table correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupSingleEmpty() throws Exception {
+
+ LOG.info("create full backup image on single table");
+
+ String backupId =
+ BackupClient.create("full", BACKUP_ROOT_DIR, table3.getNameAsString(), null);
+ LOG.info("Finished Backup");
+ assertTrue(checkSucceeded(backupId));
+
+ }
+
+ /**
+ * Verify that full backup is created on multiple empty tables correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupMultipleEmpty() throws Exception {
+ LOG.info("create full backup image on mulitple empty tables");
+ String tableset =
+ table3.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table4.getNameAsString();
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ assertTrue(checkSucceeded(backupId));
+
+ }
+
+ /**
+ * Verify that full backup fails on a single table that does not exist.
+ * @throws Exception
+ */
+ @Test(expected = RuntimeException.class)
+ public void testFullBackupSingleDNE() throws Exception {
+
+ LOG.info("test full backup fails on a single table that does not exist");
+ BackupClient.create("full", BACKUP_ROOT_DIR, "tabledne", null);
+ }
+
+ /**
+ * Verify that full backup fails on multiple tables that do not exist.
+ * @throws Exception
+ */
+ @Test(expected = RuntimeException.class)
+ public void testFullBackupMultipleDNE() throws Exception {
+
+ LOG.info("test full backup fails on multiple tables that do not exist");
+ BackupClient.create("full", BACKUP_ROOT_DIR, "table1dne,table2dne", null);
+ }
+
+ /**
+ * Verify that full backup fails on tableset containing real and fake tables.
+ * @throws Exception
+ */
+ @Test(expected = RuntimeException.class)
+ public void testFullBackupMixExistAndDNE() throws Exception {
+ LOG.info("create full backup fails on tableset containing real and fake table");
+ String tableset =
+ table1.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + "tabledne";
+ BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ }
+
+}
\ No newline at end of file
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
new file mode 100644
index 0000000..b448cc1
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java
@@ -0,0 +1,330 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.backup.BackupHandler.BACKUPSTATUS;
+import org.apache.hadoop.hbase.backup.BackupUtil.BackupCompleteData;
+import org.apache.hadoop.hbase.client.Admin;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Test cases for hbase:backup API
+ *
+ */
+public class TestBackupSystemTable {
+
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ protected static Configuration conf = UTIL.getConfiguration();
+ protected static MiniHBaseCluster cluster;
+
+ @BeforeClass
+ public static void setUp() throws Exception
+ {
+ cluster = UTIL.startMiniCluster();
+
+ }
+
+ @Test
+ public void testUpdateReadDeleteBackupStatus() throws IOException
+ {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+ BackupContext ctx = createBackupContext();
+ table.updateBackupStatus(ctx);
+ BackupContext readCtx = table.readBackupStatus(ctx.getBackupId());
+ assertTrue(compare(ctx, readCtx));
+
+ // try fake backup id
+ readCtx = table.readBackupStatus("fake");
+
+ assertNull(readCtx);
+ // delete backup context
+ table.deleteBackupStatus(ctx.getBackupId());
+ readCtx = table.readBackupStatus(ctx.getBackupId());
+ assertNull(readCtx);
+ cleanBackupTable();
+ }
+
+ @Test
+ public void testWriteReadBackupStartCode() throws IOException
+ {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+ String code = "100";
+ table.writeBackupStartCode(code);
+ String readCode = table.readBackupStartCode();
+ assertEquals(code, readCode);
+ cleanBackupTable();
+ }
+
+
+ private void cleanBackupTable() throws IOException {
+ Admin admin = UTIL.getHBaseAdmin();
+ admin.disableTable(BackupSystemTable.getTableName());
+ admin.truncateTable(BackupSystemTable.getTableName(), true);
+ if(admin.isTableDisabled(BackupSystemTable.getTableName())){
+ admin.enableTable(BackupSystemTable.getTableName());
+ }
+ }
+
+ @Test
+ public void testBackupHistory() throws IOException
+ {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+ int n = 10;
+ List list = createBackupContextList(n);
+
+ // Load data
+ for(BackupContext bc: list){
+ // Make sure we set right status
+ bc.setFlag(BACKUPSTATUS.COMPLETE);
+ table.updateBackupStatus(bc);
+ }
+
+ // Reverse list for comparison
+ Collections.reverse(list);
+ ArrayList history = table.getBackupHistory();
+ assertTrue(history.size() == n);
+
+ for(int i =0; i < n; i++){
+ BackupContext ctx = list.get(i);
+ BackupCompleteData data = history.get(i);
+ assertTrue( compare( ctx, data));
+ }
+
+ cleanBackupTable();
+
+ }
+
+ @Test
+ public void testRegionServerLastLogRollResults() throws IOException
+ {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+
+ String[] servers = new String[]{"server1", "server2", "server3"};
+ String[] timestamps = new String[]{"100", "102", "107"};
+
+ for(int i=0; i < servers.length; i++){
+ table.writeRegionServerLastLogRollResult(servers[i], timestamps[i]);
+ }
+
+ HashMap result = table.readRegionServerLastLogRollResult();
+ assertTrue(servers.length == result.size());
+ Set keys = result.keySet();
+ String[] keysAsArray = new String[keys.size()];
+ keys.toArray(keysAsArray);
+ Arrays.sort(keysAsArray);
+
+ for(int i=0; i < keysAsArray.length; i++){
+ assertEquals(keysAsArray[i], servers[i]);
+ String ts1 = timestamps[i];
+ String ts2 = result.get(keysAsArray[i]);
+ assertEquals(ts1, ts2);
+ }
+
+ cleanBackupTable();
+
+ }
+
+ @Test
+ public void testIncrementalBackupTableSet() throws IOException
+ {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+
+ TreeSet tables1 = new TreeSet();
+
+ tables1.add("t1");
+ tables1.add("t2");
+ tables1.add("t3");
+
+ TreeSet tables2 = new TreeSet();
+
+ tables2.add("t3");
+ tables2.add("t4");
+ tables2.add("t5");
+
+ table.addIncrementalBackupTableSet(tables1);
+ TreeSet res1 = (TreeSet)table.getIncrementalBackupTableSet();
+ assertTrue(tables1.size() == res1.size());
+ Iterator desc1 = tables1.descendingIterator();
+ Iterator desc2 = res1.descendingIterator();
+ while(desc1.hasNext()){
+ assertEquals( desc1.next(), desc2.next());
+ }
+
+ table.addIncrementalBackupTableSet(tables2);
+ TreeSet res2 = (TreeSet)table.getIncrementalBackupTableSet();
+ assertTrue((tables2.size() + tables1.size() -1) == res2.size());
+
+ tables1.addAll(tables2);
+
+ desc1 = tables1.descendingIterator();
+ desc2 = res2.descendingIterator();
+
+ while(desc1.hasNext()){
+ assertEquals( desc1.next(), desc2.next());
+ }
+ cleanBackupTable();
+
+ }
+
+ @Test
+ public void testRegionServerLogTimestampMap() throws IOException
+ {
+ BackupSystemTable table = BackupSystemTable.getTable(conf);
+
+ TreeSet tables = new TreeSet();
+
+ tables.add("t1");
+ tables.add("t2");
+ tables.add("t3");
+
+ HashMap rsTimestampMap = new HashMap();
+
+ rsTimestampMap.put("rs1", "100");
+ rsTimestampMap.put("rs2", "101");
+ rsTimestampMap.put("rs3", "103");
+
+ table.writeRegionServerLogTimestamp(tables, rsTimestampMap);
+
+
+ HashMap> result = table.readLogTimestampMap();
+
+ assertTrue( tables.size() == result.size());
+
+ for(String t: tables){
+ HashMap rstm = result.get(t);
+ assertNotNull(rstm);
+ assertEquals(rstm.get("rs1"), "100");
+ assertEquals(rstm.get("rs2"), "101");
+ assertEquals(rstm.get("rs3"), "103");
+ }
+
+ Set tables1 = new TreeSet();
+
+ tables1.add("t3");
+ tables1.add("t4");
+ tables1.add("t5");
+
+ HashMap rsTimestampMap1 = new HashMap();
+
+ rsTimestampMap1.put("rs1", "200");
+ rsTimestampMap1.put("rs2", "201");
+ rsTimestampMap1.put("rs3", "203");
+
+ table.writeRegionServerLogTimestamp(tables1, rsTimestampMap1);
+
+ result = table.readLogTimestampMap();
+
+ assertTrue( 5 == result.size());
+
+ for(String t: tables){
+ HashMap rstm = result.get(t);
+ assertNotNull(rstm);
+ if( t.equals("t3") == false){
+ assertEquals(rstm.get("rs1"), "100");
+ assertEquals(rstm.get("rs2"), "101");
+ assertEquals(rstm.get("rs3"), "103");
+ } else{
+ assertEquals(rstm.get("rs1"), "200");
+ assertEquals(rstm.get("rs2"), "201");
+ assertEquals(rstm.get("rs3"), "203");
+ }
+ }
+
+ for(String t: tables1){
+ HashMap rstm = result.get(t);
+ assertNotNull(rstm);
+ assertEquals(rstm.get("rs1"), "200");
+ assertEquals(rstm.get("rs2"), "201");
+ assertEquals(rstm.get("rs3"), "203");
+ }
+
+ cleanBackupTable();
+
+ }
+
+ private boolean compare(BackupContext ctx, BackupCompleteData data) {
+
+ return ctx.getBackupId().equals(data.getBackupToken()) &&
+ ctx.getTargetRootDir().equals(data.getBackupRootPath()) &&
+ ctx.getType().equals(data.getType()) &&
+ ctx.getStartTs() == Long.parseLong(data.getStartTime()) &&
+ ctx.getEndTs() == Long.parseLong(data.getEndTime());
+
+ }
+
+ private boolean compare(BackupContext one, BackupContext two){
+ return one.getBackupId().equals(two.getBackupId()) &&
+ one.getType().equals(two.getType()) &&
+ one.getTargetRootDir().equals(two.getTargetRootDir()) &&
+ one.getStartTs() == two.getStartTs() &&
+ one.getEndTs() == two.getEndTs();
+ }
+
+ private BackupContext createBackupContext() {
+
+ BackupContext ctxt = new BackupContext("backup_"+System.nanoTime(),
+ "full", new String[]{"t1", "t2", "t3"}, "/hbase/backup", null);
+ ctxt.setStartTs(System.currentTimeMillis());
+ ctxt.setEndTs(System.currentTimeMillis() +1);
+ return ctxt;
+ }
+
+ private List createBackupContextList(int size)
+ {
+ List list = new ArrayList();
+ for(int i=0; i < size; i++){
+ list.add(createBackupContext());
+ try {
+ Thread.sleep(10);
+ } catch (InterruptedException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ return list;
+ }
+
+
+ @AfterClass
+ public static void tearDown() throws IOException
+ {
+ if(cluster != null) cluster.shutdown();
+ }
+
+}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java
new file mode 100644
index 0000000..19caf37
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+ * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
+ * for the specific language governing permissions and limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestFullBackup extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestFullBackup.class);
+
+ /**
+ * Verify that full backup is created on a single table with data correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupSingle() throws Exception {
+
+ LOG.info("test full backup on a single table with data");
+ String backupId =
+ BackupClient.create("full", BACKUP_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ assertTrue(checkSucceeded(backupId));
+ }
+
+ /**
+ * Verify that full backup is created on multiple tables correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupMultiple() throws Exception {
+ LOG.info("create full backup image on multiple tables with data");
+ String tableset =
+ table1.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table2.getNameAsString();
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ assertTrue(checkSucceeded(backupId));
+
+ }
+
+ /**
+ * Verify that full backup is created on all tables correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupAll() throws Exception {
+ LOG.info("create full backup image on all tables");
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, null, null);
+ assertTrue(checkSucceeded(backupId));
+
+ }
+
+ /**
+ * Verify that full backup is created on a table correctly using a snapshot.
+ * @throws Exception
+ */
+ //@Test
+ //public void testFullBackupUsingSnapshot() throws Exception {
+ // HBaseAdmin hba = new HBaseAdmin(conf1);
+ //String snapshot = "snapshot";
+ //hba.snapshot(snapshot, table1);
+ //LOG.info("create full backup image on a table using snapshot");
+ //String backupId =
+ // BackupClient.create("full", BACKUP_ROOT_DIR, table1.getNameAsString(),
+ // snapshot);
+ // }
+
+}
\ No newline at end of file
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
new file mode 100644
index 0000000..a443f3c
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java
@@ -0,0 +1,171 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+ * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
+ * for the specific language governing permissions and limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestFullRestore extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestFullRestore.class);
+
+ /**
+ * Verify that a single table is restored to a new table
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreSingle() throws Exception {
+
+ LOG.info("test full restore on a single table empty table");
+ String backupId =
+ BackupClient.create("full", BACKUP_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ assertTrue(checkSucceeded(backupId));
+
+ String[] tableset = new String[] { table1.getNameAsString() };
+ String[] tablemap = new String[] { table1_restore };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap,
+ false);
+ HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hba.tableExists(table1_restore));
+ TEST_UTIL.deleteTable(table1_restore);
+ hba.close();
+ }
+
+ /**
+ * Verify that multiple tables are restored to new tables.
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreMultiple() throws Exception {
+ LOG.info("create full backup image on multiple tables");
+ String tableset =
+ table2.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table3.getNameAsString();
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ assertTrue(checkSucceeded(backupId));
+
+ String[] restore_tableset = new String[] { table2.getNameAsString(), table3.getNameAsString() };
+ String[] tablemap = new String[] { table2_restore, table3_restore };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient
+.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false, restore_tableset,
+ tablemap,
+ false);
+ HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hba.tableExists(table2_restore));
+ assertTrue(hba.tableExists(table3_restore));
+ TEST_UTIL.deleteTable(table2_restore);
+ TEST_UTIL.deleteTable(table3_restore);
+ hba.close();
+ }
+
+ /**
+ * Verify that a single table is restored using overwrite
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreSingleOverwrite() throws Exception {
+
+ LOG.info("test full restore on a single table empty table");
+ String backupId =
+ BackupClient.create("full", BACKUP_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ assertTrue(checkSucceeded(backupId));
+
+ String[] tableset = new String[] { table1.getNameAsString() };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false, tableset, null,
+ true);
+ }
+
+ /**
+ * Verify that multiple tables are restored to new tables using overwrite.
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreMultipleOverwrite() throws Exception {
+ LOG.info("create full backup image on multiple tables");
+ String tableset =
+ table2.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table3.getNameAsString();
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ assertTrue(checkSucceeded(backupId));
+
+ String[] restore_tableset = new String[] { table2.getNameAsString(), table3.getNameAsString() };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient
+.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false, restore_tableset,
+ null,
+ true);
+ }
+
+ /**
+ * Verify that restore fails on a single table that does not exist.
+ * @throws Exception
+ */
+ @Test(expected = IOException.class)
+ public void testFullRestoreSingleDNE() throws Exception {
+
+ LOG.info("test restore fails on a single table that does not exist");
+ String backupId =
+ BackupClient.create("full", BACKUP_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ assertTrue(checkSucceeded(backupId));
+
+ String[] tableset = new String[] { "faketable" };
+ String[] tablemap = new String[] { table1_restore };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap,
+ false);
+ }
+
+ /**
+ * Verify that restore fails on multiple tables that do not exist.
+ * @throws Exception
+ */
+ @Test(expected = IOException.class)
+ public void testFullRestoreMultipleDNE() throws Exception {
+
+ LOG.info("test restore fails on multiple tables that do not exist");
+ String tableset =
+ table2.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table3.getNameAsString();
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ assertTrue(checkSucceeded(backupId));
+
+ String[] restore_tableset = new String[] { "faketable1", "faketable2" };
+ String[] tablemap = new String[] { table2_restore, table3_restore };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient
+.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false, restore_tableset,
+ tablemap,
+ false);
+ }
+}
\ No newline at end of file
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
new file mode 100644
index 0000000..6ecd227
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
@@ -0,0 +1,179 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.hamcrest.CoreMatchers;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestIncrementalBackup extends TestBackupBase {
+ private static final Log LOG = LogFactory.getLog(TestIncrementalBackup.class);
+ //implement all testcases in 1 test since incremental backup/restore has dependencies
+ @Test
+ public void TestIncBackupRestore() throws Exception {
+ HBackupFileSystem hbfs;
+
+ // #1 - create full backup for all tables
+ LOG.info("create full backup image for all tables");
+ String tablesetFull =
+ table1.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table2.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table3.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table4.getNameAsString();
+
+ String backupIdFull =
+ BackupClient.create("full", BACKUP_ROOT_DIR, tablesetFull, null);
+ assertTrue(checkSucceeded(backupIdFull));
+
+ Connection conn = ConnectionFactory.createConnection(conf1);
+ // #2 - insert some data to table
+ HTable t1 = (HTable) conn.getTable(table1);
+ Put p1;
+ for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
+ p1 = new Put(Bytes.toBytes("row-t1" + i));
+ p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+ t1.put(p1);
+ }
+
+ Assert.assertThat(TEST_UTIL.countRows(t1), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2));
+ t1.close();
+
+ HTable t2 = (HTable) conn.getTable(table2);
+ Put p2;
+ for (int i = 0; i < 5; i++) {
+ p2 = new Put(Bytes.toBytes("row-t2" + i));
+ p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+ t2.put(p2);
+ }
+
+ Assert.assertThat(TEST_UTIL.countRows(t2), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5));
+ t2.close();
+
+ // #3 - incremental backup for multiple tables
+ String tablesetIncMultiple =
+ table1.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table2.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table3.getNameAsString();
+
+ String backupIdIncMultiple = BackupClient.create("incremental", BACKUP_ROOT_DIR,
+ tablesetIncMultiple, null);
+ assertTrue(checkSucceeded(backupIdIncMultiple));
+
+
+ // #4 - restore full backup for all tables, without overwrite
+ String[] tablesRestoreFull =
+ new String[] { table1.getNameAsString(), table2.getNameAsString(),
+ table3.getNameAsString(), table4.getNameAsString() };
+
+ String[] tablesMapFull =
+ new String[] { table1_restore, table2_restore, table3_restore, table4_restore };
+
+ hbfs = new HBackupFileSystem(conf1, new Path(BACKUP_ROOT_DIR), backupIdFull);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupIdFull, false, false,
+ tablesRestoreFull,
+ tablesMapFull, false);
+
+ // #5.1 - check tables for full restore
+ HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hAdmin.tableExists(table1_restore));
+ assertTrue(hAdmin.tableExists(table2_restore));
+ assertTrue(hAdmin.tableExists(table3_restore));
+ assertTrue(hAdmin.tableExists(table4_restore));
+
+ hAdmin.close();
+
+ // #5.2 - checking row count of tables for full restore
+ HTable hTable = (HTable) conn.getTable(TableName.valueOf(table1_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH));
+ hTable.close();
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table2_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH));
+ hTable.close();
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table3_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+ hTable.close();
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table4_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+ hTable.close();
+
+ // #6 - restore incremental backup for multiple tables, with overwrite
+ String[] tablesRestoreIncMultiple =
+ new String[]
+ { table1.getNameAsString(), table2.getNameAsString(), table3.getNameAsString() };
+ String[] tablesMapIncMultiple =
+ new String[] { table1_restore, table2_restore, table3_restore };
+ hbfs = new HBackupFileSystem(conf1, new Path(BACKUP_ROOT_DIR), backupIdIncMultiple);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupIdIncMultiple, false, false,
+ tablesRestoreIncMultiple, tablesMapIncMultiple, true);
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table1_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2));
+ hTable.close();
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table2_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5));
+ hTable.close();
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table3_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+ hTable.close();
+
+ // #7 - incremental backup for single, empty table
+
+ String tablesetIncEmpty = table4.getNameAsString();
+ String backupIdIncEmpty =
+ BackupClient.create("incremental", BACKUP_ROOT_DIR, tablesetIncEmpty, null);
+ assertTrue(checkSucceeded(backupIdIncEmpty));
+
+
+ // #8 - restore incremental backup for single empty table, with overwrite
+ String[] tablesRestoreIncEmpty = new String[] { table4.getNameAsString() };
+ String[] tablesMapIncEmpty = new String[] { table4_restore };
+ hbfs = new HBackupFileSystem(conf1, new Path(BACKUP_ROOT_DIR), backupIdIncEmpty);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupIdIncEmpty, false, false,
+ tablesRestoreIncEmpty,
+ tablesMapIncEmpty, true);
+
+ hTable = (HTable) conn.getTable(TableName.valueOf(table4_restore));
+ Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0));
+ hTable.close();
+ conn.close();
+
+ }
+
+}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
new file mode 100644
index 0000000..03822a2
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+ * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
+ * for the specific language governing permissions and limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestRemoteBackup extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestRemoteBackup.class);
+
+ /**
+ * Verify that a remote full backup is created on a single table with data correctly.
+ * @throws Exception
+ */
+ @Test
+ public void testFullBackupRemote() throws Exception {
+
+ LOG.info("test remote full backup on a single table");
+
+ // String rootdir = TEST_UTIL2.getDefaultRootDirPath() + BACKUP_ROOT_DIR;
+ // LOG.info("ROOTDIR " + rootdir);
+ String backupId =
+ BackupClient.create("full", BACKUP_REMOTE_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ assertTrue(checkSucceeded(backupId));
+ }
+
+}
\ No newline at end of file
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
new file mode 100644
index 0000000..e2b4d0a
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
+ * agreements. See the NOTICE file distributed with this work for additional information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+ * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
+ * for the specific language governing permissions and limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestRemoteRestore extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestRemoteRestore.class);
+
+ /**
+ * Verify that a remote restore on a single table is successful.
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreRemote() throws Exception {
+
+ LOG.info("test remote full backup on a single table");
+ String backupId =
+ BackupClient.create("full", BACKUP_REMOTE_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ assertTrue(checkSucceeded(backupId));
+
+ String[] tableset = new String[] { table1.getNameAsString() };
+ String[] tablemap = new String[] { table1_restore };
+ Path path = new Path(BACKUP_REMOTE_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient.restore_stage1(hbfs, BACKUP_REMOTE_ROOT_DIR, backupId, false, false, tableset,
+ tablemap, false);
+ HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hba.tableExists(table1_restore));
+ TEST_UTIL.deleteTable(table1_restore);
+ hba.close();
+ }
+
+}
\ No newline at end of file
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
new file mode 100644
index 0000000..593c774
--- /dev/null
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(LargeTests.class)
+public class TestRestoreBoundaryTests extends TestBackupBase {
+
+ private static final Log LOG = LogFactory.getLog(TestRestoreBoundaryTests.class);
+
+ /**
+ * Verify that a single empty table is restored to a new table
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreSingleEmpty() throws Exception {
+
+ LOG.info("test full restore on a single table empty table");
+ String backupId =
+ BackupClient.create("full", BACKUP_ROOT_DIR, table1.getNameAsString(), null);
+ LOG.info("backup complete");
+ assertTrue(checkSucceeded(backupId));
+ String[] tableset = new String[] { table1.getNameAsString() };
+ String[] tablemap = new String[] { table1_restore };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap,
+ false);
+ HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hba.tableExists(table1_restore));
+ TEST_UTIL.deleteTable(table1_restore);
+ }
+
+ /**
+ * Verify that multiple tables are restored to new tables.
+ * @throws Exception
+ */
+ @Test
+ public void testFullRestoreMultipleEmpty() throws Exception {
+ LOG.info("create full backup image on multiple tables");
+ String tableset =
+ table2.getNameAsString() + BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND
+ + table3.getNameAsString();
+ String backupId = BackupClient.create("full", BACKUP_ROOT_DIR, tableset, null);
+ assertTrue(checkSucceeded(backupId));
+ String[] restore_tableset = new String[] { table2.getNameAsString(), table3.getNameAsString() };
+ String[] tablemap = new String[] { table2_restore, table3_restore };
+ Path path = new Path(BACKUP_ROOT_DIR);
+ HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId);
+ RestoreClient.restore_stage1(hbfs, BACKUP_ROOT_DIR, backupId, false, false, restore_tableset,
+ tablemap,
+ false);
+ HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
+ assertTrue(hba.tableExists(table2_restore));
+ assertTrue(hba.tableExists(table3_restore));
+ TEST_UTIL.deleteTable(table2_restore);
+ TEST_UTIL.deleteTable(table3_restore);
+ }
+}
\ No newline at end of file