diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
new file mode 100644
index 0000000..42a39b5
--- /dev/null
+++ hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
@@ -0,0 +1,364 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Random;
+import java.util.Set;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.backup.BackupInfo;
+import org.apache.hadoop.hbase.backup.BackupRequest;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.RestoreRequest;
+import org.apache.hadoop.hbase.backup.BackupInfo.BackupState;
+import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
+import org.apache.hadoop.hbase.chaos.factories.MonkeyFactory;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.BackupAdmin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.RegionSplitter;
+import org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm;
+import org.apache.hadoop.util.ToolRunner;
+import org.hamcrest.CoreMatchers;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.base.Objects;
+import com.google.common.collect.Lists;
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * An integration test to detect regressions in HBASE-7912. Create
+ * a table with many regions, load data, perform series backup/load operations,
+ * then restore and verify data
+ * @see HBASE-7912
+ */
+@Category(IntegrationTests.class)
+public class IntegrationTestBackupRestore extends IntegrationTestBase {
+
+ private static final String CLASS_NAME = IntegrationTestBackupRestore.class.getSimpleName();
+
+ protected static final Log LOG = LogFactory.getLog(IntegrationTestBackupRestore.class);
+ protected static final TableName TABLE_NAME1 = TableName.valueOf(CLASS_NAME+".table1");
+ protected static final TableName TABLE_NAME2 = TableName.valueOf(CLASS_NAME+".table2");
+ protected static final String COLUMN_NAME = "f";
+ protected static final String REGION_COUNT_KEY = String.format("hbase.%s.regions", CLASS_NAME);
+ protected static final String REGIONSERVER_COUNT_KEY = String.format("hbase.%s.regionServers",
+ CLASS_NAME);
+
+ protected static final int DEFAULT_REGION_COUNT = 50;
+ protected static final int DEFAULT_REGIONSERVER_COUNT = 5;
+
+ protected static int regionCount;
+ protected static int regionServerCount;
+
+ protected static final String NB_ROWS_IN_BATCH_KEY = "rowsInBatch.key";
+ protected static final int DEFAULT_NB_ROWS_IN_BATCH = 20000;
+ private static int rowsInBatch;
+
+ private static String BACKUP_ROOT_DIR = "backupIT";
+ private Random random = new Random();
+
+ @Before
+ public void setUp() throws Exception {
+ util = new IntegrationTestingUtility();
+ regionCount = util.getConfiguration().getInt(REGION_COUNT_KEY,
+ DEFAULT_REGION_COUNT);
+ regionServerCount = util.getConfiguration().getInt(
+ REGIONSERVER_COUNT_KEY, DEFAULT_REGIONSERVER_COUNT);
+ rowsInBatch = util.getConfiguration().getInt(
+ NB_ROWS_IN_BATCH_KEY, DEFAULT_NB_ROWS_IN_BATCH);
+
+ LOG.info(String.format("Initializing cluster with %d region servers.", regionServerCount));
+ util.initializeCluster(regionServerCount);
+ LOG.info("Cluster initialized");
+
+ Admin admin = util.getConnection().getAdmin();
+ if (admin.tableExists(TABLE_NAME1)) {
+ LOG.info(String.format("Deleting existing table %s.", TABLE_NAME1));
+ if (admin.isTableEnabled(TABLE_NAME1)) admin.disableTable(TABLE_NAME1);
+ admin.deleteTable(TABLE_NAME1);
+ LOG.info(String.format("Existing table %s deleted.", TABLE_NAME1));
+ }
+ waitForSystemTable();
+ LOG.info("Cluster ready");
+ }
+
+ public void waitForSystemTable() throws Exception
+ {
+ waitForTable(BackupSystemTable.getTableName());
+ }
+
+ public void waitForTable(TableName table) throws Exception
+ {
+ try(Admin admin = util.getAdmin();) {
+ while (!admin.tableExists(table)
+ || !admin.isTableAvailable(table)) {
+ Thread.sleep(1000);
+ }
+ }
+ LOG.debug("Table "+table+" exists and available");
+ }
+
+
+ @After
+ public void tearDown() throws IOException {
+ LOG.info("Cleaning up after test.");
+ Admin admin = util.getConnection().getAdmin();
+ if (admin.tableExists(TABLE_NAME1)) {
+ if (admin.isTableEnabled(TABLE_NAME1)) admin.disableTable(TABLE_NAME1);
+ admin.deleteTable(TABLE_NAME1);
+ }
+ LOG.info("Restoring cluster.");
+ util.restoreCluster();
+ LOG.info("Cluster restored.");
+ }
+
+ @Test
+ public void testBackupRestore() throws Exception {
+ BACKUP_ROOT_DIR = util.getConfiguration().get("fs.defaultFS") + "/backupIT";
+ try {
+ createTable(TABLE_NAME1);
+ createTable(TABLE_NAME2);
+ runTest();
+ } catch (Exception e) {
+ LOG.error("Failed", e);
+ }
+ }
+
+ private void createTable(TableName tableName) throws Exception
+ {
+ long startTime, endTime;
+ HTableDescriptor desc = new HTableDescriptor(tableName);
+ desc.addFamily(new HColumnDescriptor(COLUMN_NAME));
+ SplitAlgorithm algo = new RegionSplitter.UniformSplit();
+ byte[][] splits = algo.split(regionCount);
+
+ LOG.info(String.format("Creating table %s with %d splits.", tableName, regionCount));
+ startTime = System.currentTimeMillis();
+
+ Admin admin = util.getConnection().getAdmin();
+ admin.createTable(desc, splits);
+ waitForTable(tableName);
+ endTime = System.currentTimeMillis();
+ LOG.info(String
+ .format("Pre-split table created successfully in %dms.", (endTime - startTime)));
+ }
+
+ private byte[] getRandomArray(int size) {
+ byte[] arr = new byte[size];
+ random.nextBytes(arr);
+ return arr;
+ }
+
+ private void loadData(TableName table, int numRows) throws IOException {
+ Connection conn = util.getConnection();
+ // #0- insert some data to a table
+ HTable t1 = (HTable) conn.getTable(table);
+ Put p1;
+ for (int i = 0; i < numRows; i++) {
+ p1 = new Put(getRandomArray(100));
+ p1.addColumn("f".getBytes(), "q".getBytes(), Bytes.toBytes("val" + i));
+ t1.put(p1);
+ }
+ t1.flushCommits();
+ }
+
+ private void runTest() throws IOException {
+ Connection conn = util.getConnection();
+ // #0- insert some data to table TABLE_NAME1, TABLE_NAME2
+ loadData(TABLE_NAME1, rowsInBatch);
+ loadData(TABLE_NAME2, rowsInBatch);
+
+ // #1 - create full backup for all tables
+ LOG.info("create full backup image for all tables");
+
+ List tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2);
+ HBaseAdmin admin = null;
+ admin = (HBaseAdmin) conn.getAdmin();
+
+ BackupRequest request = new BackupRequest();
+ request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR);
+ String backupIdFull = admin.getBackupAdmin().backupTables(request);
+
+ assertTrue(checkSucceeded(backupIdFull));
+
+ // #2 - insert some data to table
+ loadData(TABLE_NAME1, rowsInBatch);
+ loadData(TABLE_NAME2, rowsInBatch);
+
+ HTable t1 = (HTable) conn.getTable(TABLE_NAME1);
+ Assert.assertThat(util.countRows(t1), CoreMatchers.equalTo(rowsInBatch * 2));
+ t1.close();
+ HTable t2 = (HTable) conn.getTable(TABLE_NAME2);
+ Assert.assertThat(util.countRows(t2), CoreMatchers.equalTo(rowsInBatch * 2));
+ t2.close();
+ // #3 - incremental backup for tables
+ tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2);
+ request = new BackupRequest();
+ request.setBackupType(BackupType.INCREMENTAL).setTableList(tables)
+ .setTargetRootDir(BACKUP_ROOT_DIR);
+ String backupIdIncMultiple = admin.getBackupAdmin().backupTables(request);
+ assertTrue(checkSucceeded(backupIdIncMultiple));
+
+ // #4 - restore full backup for all tables, without overwrite
+ TableName[] tablesRestoreFull = new TableName[] { TABLE_NAME1, TABLE_NAME2 };
+
+ BackupAdmin client = util.getAdmin().getBackupAdmin();
+ client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull,
+ null, false));
+
+ // #5.1 - check tables for full restore
+ Admin hAdmin = util.getConnection().getAdmin();
+ assertTrue(hAdmin.tableExists(TABLE_NAME1));
+ assertTrue(hAdmin.tableExists(TABLE_NAME2));
+ hAdmin.close();
+
+ // #5.2 - checking row count of tables for full restore
+ HTable hTable = (HTable) conn.getTable(TABLE_NAME1);
+ Assert.assertThat(util.countRows(hTable), CoreMatchers.equalTo(rowsInBatch));
+ hTable.close();
+ hTable = (HTable) conn.getTable(TABLE_NAME2);
+ Assert.assertThat(util.countRows(hTable), CoreMatchers.equalTo(rowsInBatch));
+ hTable.close();
+
+ // #6 - restore incremental backup for multiple tables, with overwrite
+ TableName[] tablesRestoreIncMultiple = new TableName[] { TABLE_NAME1 , TABLE_NAME2};
+ client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false,
+ tablesRestoreIncMultiple, null, true));
+
+ hTable = (HTable) conn.getTable(TABLE_NAME1);
+ Assert.assertThat(util.countRows(hTable), CoreMatchers.equalTo(rowsInBatch * 2));
+ hTable.close();
+ hTable = (HTable) conn.getTable(TABLE_NAME2);
+ Assert.assertThat(util.countRows(hTable), CoreMatchers.equalTo(rowsInBatch * 2));
+ hTable.close();
+ admin.close();
+ conn.close();
+
+ }
+
+ protected boolean checkSucceeded(String backupId) throws IOException {
+ BackupInfo status = getBackupContext(backupId);
+ if (status == null) return false;
+ return status.getState() == BackupState.COMPLETE;
+ }
+
+ private BackupInfo getBackupContext(String backupId) throws IOException {
+ try (BackupSystemTable table = new BackupSystemTable(util.getConnection())) {
+ BackupInfo status = table.readBackupInfo(backupId);
+ return status;
+ }
+ }
+
+ /**
+ * Get restore request.
+ */
+ public RestoreRequest createRestoreRequest(String backupRootDir, String backupId, boolean check,
+ TableName[] fromTables, TableName[] toTables, boolean isOverwrite) {
+ RestoreRequest request = new RestoreRequest();
+ request.setBackupRootDir(backupRootDir).setBackupId(backupId).setCheck(check)
+ .setFromTables(fromTables).setToTables(toTables).setOverwrite(isOverwrite);
+ return request;
+ }
+
+ @Override
+ public void setUpCluster() throws Exception {
+ util = getTestingUtil(getConf());
+ LOG.debug("Initializing/checking cluster has " + regionServerCount + " servers");
+ util.initializeCluster(regionServerCount);
+ LOG.debug("Done initializing/checking cluster");
+ }
+
+ @Override
+ public int runTestFromCommandLine() throws Exception {
+ testBackupRestore();
+ return 0;
+ }
+
+ @Override
+ public TableName getTablename() {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ protected Set getColumnFamilies() {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ @Override
+ protected MonkeyFactory getDefaultMonkeyFactory() {
+ // Run with no monkey
+ return MonkeyFactory.getFactory(MonkeyFactory.CALM);
+ }
+
+ @Override
+ protected void addOptions() {
+ addOptWithArg(REGIONSERVER_COUNT_KEY, "Total number of region servers. Default: '"
+ + DEFAULT_REGIONSERVER_COUNT + "'");
+ addOptWithArg(REGION_COUNT_KEY, "Total number of regions. Default: "
+ + DEFAULT_REGION_COUNT);
+ addOptWithArg(NB_ROWS_IN_BATCH_KEY, "Total number of data rows to be loaded (per table/batch."+
+ " Total number of batches=2). Default: "
+ + DEFAULT_NB_ROWS_IN_BATCH);
+
+ }
+
+ @Override
+ protected void processOptions(CommandLine cmd) {
+ regionCount =
+ Integer.parseInt(cmd.getOptionValue(REGION_COUNT_KEY,
+ Integer.toString(DEFAULT_REGION_COUNT)));
+
+ regionServerCount = Integer.parseInt(
+ cmd.getOptionValue(REGIONSERVER_COUNT_KEY, Integer.toString(DEFAULT_REGIONSERVER_COUNT)));
+ rowsInBatch = Integer.parseInt(cmd.getOptionValue(NB_ROWS_IN_BATCH_KEY,
+ Integer.toString(DEFAULT_NB_ROWS_IN_BATCH)));
+
+ LOG.debug(Objects.toStringHelper("Parsed Options")
+ .add(REGION_COUNT_KEY, regionCount)
+ .add(REGIONSERVER_COUNT_KEY, regionServerCount)
+ .add(NB_ROWS_IN_BATCH_KEY, rowsInBatch)
+ .toString());
+ }
+
+
+ public static void main(String[] args) throws Exception {
+ Configuration conf = HBaseConfiguration.create();
+ IntegrationTestingUtility.setUseDistributedCluster(conf);
+ int status = ToolRunner.run(conf, new IntegrationTestBackupRestore(), args);
+ System.exit(status);
+ }
+}