From adf0a6606c78096b56b29a7330b9f387b681c4d9 Mon Sep 17 00:00:00 2001 From: Artem Ervits Date: Thu, 15 Nov 2018 15:33:16 -0500 Subject: [PATCH] Enable MOB in backup / restore test involving incremental backup --- .../hadoop/hbase/backup/TestIncrementalBackup.java | 237 ++++++++++----------- 1 file changed, 108 insertions(+), 129 deletions(-) diff --git hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java index 48e2c5eb8b..d400d7b227 100644 --- hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java +++ hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java @@ -63,7 +63,7 @@ public class TestIncrementalBackup extends TestBackupBase { @Parameterized.Parameters public static Collection data() { provider = "multiwal"; - List params = new ArrayList(); + List params = new ArrayList<>(); params.add(new Object[] { Boolean.TRUE }); return params; } @@ -81,136 +81,115 @@ public class TestIncrementalBackup extends TestBackupBase { List tables = Lists.newArrayList(table1, table2); final byte[] fam3Name = Bytes.toBytes("f3"); + final byte[] mobName = Bytes.toBytes("mob"); table1Desc.addFamily(new HColumnDescriptor(fam3Name)); + HColumnDescriptor mobHcd = new HColumnDescriptor(mobName); + mobHcd.setMobEnabled(true); + mobHcd.setMobThreshold(0L); + table1Desc.addFamily(mobHcd); HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc); - Connection conn = ConnectionFactory.createConnection(conf1); - int NB_ROWS_FAM3 = 6; - insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close(); - - HBaseAdmin admin = null; - admin = (HBaseAdmin) conn.getAdmin(); - BackupAdminImpl client = new BackupAdminImpl(conn); - - BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); - String backupIdFull = client.backupTables(request); - - assertTrue(checkSucceeded(backupIdFull)); - - // #2 - insert some data to table - HTable t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS); - LOG.debug("writing " + ADD_ROWS + " rows to " + table1); - - Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3); - t1.close(); - LOG.debug("written " + ADD_ROWS + " rows to " + table1); - - HTable t2 = (HTable) conn.getTable(table2); - Put p2; - for (int i = 0; i < 5; i++) { - p2 = new Put(Bytes.toBytes("row-t2" + i)); - p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); - t2.put(p2); - } - - Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + 5); - t2.close(); - LOG.debug("written " + 5 + " rows to " + table2); - // split table1 - MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); - List regions = cluster.getRegions(table1); - - byte[] name = regions.get(0).getRegionInfo().getRegionName(); - long startSplitTime = EnvironmentEdgeManager.currentTime(); - try { - admin.splitRegion(name); - } catch (IOException e) { - //although split fail, this may not affect following check - //In old split without AM2, if region's best split key is not found, - //there are not exception thrown. But in current API, exception - //will be thrown. - LOG.debug("region is not splittable, because " + e); - } - - while (!admin.isTableAvailable(table1)) { - Thread.sleep(100); + try (Connection conn = ConnectionFactory.createConnection(conf1)) { + int NB_ROWS_FAM3 = 6; + insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close(); + insertIntoTable(conn, table1, mobName, 3, NB_ROWS_FAM3).close(); + HBaseAdmin admin = null; + admin = (HBaseAdmin) conn.getAdmin(); + BackupAdminImpl client = new BackupAdminImpl(conn); + BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); + String backupIdFull = client.backupTables(request); + assertTrue(checkSucceeded(backupIdFull)); + // #2 - insert some data to table + HTable t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS); + LOG.debug("writing " + ADD_ROWS + " rows to " + table1); + Assert.assertEquals(HBaseTestingUtility.countRows(t1), + NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3); + t1.close(); + LOG.debug("written " + ADD_ROWS + " rows to " + table1); + HTable t2 = (HTable) conn.getTable(table2); + Put p2; + for (int i = 0; i < 5; i++) { + p2 = new Put(Bytes.toBytes("row-t2" + i)); + p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t2.put(p2); + } + Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtility.countRows(t2)); + t2.close(); + LOG.debug("written " + 5 + " rows to " + table2); + // split table1 + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + List regions = cluster.getRegions(table1); + byte[] name = regions.get(0).getRegionInfo().getRegionName(); + long startSplitTime = EnvironmentEdgeManager.currentTime(); + try { + admin.splitRegion(name); + } catch (IOException e) { + //although split fail, this may not affect following check + //In old split without AM2, if region's best split key is not found, + //there are not exception thrown. But in current API, exception + //will be thrown. + LOG.debug("region is not splittable, because " + e); + } + while (!admin.isTableAvailable(table1)) { + Thread.sleep(100); + } + long endSplitTime = EnvironmentEdgeManager.currentTime(); + // split finished + LOG.debug("split finished in =" + (endSplitTime - startSplitTime)); + // #3 - incremental backup for multiple tables + tables = Lists.newArrayList(table1, table2); + request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); + String backupIdIncMultiple = client.backupTables(request); + assertTrue(checkSucceeded(backupIdIncMultiple)); + // add column family f2 to table1 + final byte[] fam2Name = Bytes.toBytes("f2"); + table1Desc.addFamily(new HColumnDescriptor(fam2Name)); + // drop column family f3 + table1Desc.removeFamily(fam3Name); + HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc); + int NB_ROWS_FAM2 = 7; + HTable t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2); + t3.close(); + // Wait for 5 sec to make sure that old WALs were deleted + Thread.sleep(5000); + // #3 - incremental backup for multiple tables + request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); + String backupIdIncMultiple2 = client.backupTables(request); + assertTrue(checkSucceeded(backupIdIncMultiple2)); + // #4 - restore full backup for all tables + TableName[] tablesRestoreFull = new TableName[] { table1, table2 }; + TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore }; + LOG.debug("Restoring full " + backupIdFull); + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, + tablesRestoreFull, tablesMapFull, true)); + // #5.1 - check tables for full restore + HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin(); + assertTrue(hAdmin.tableExists(table1_restore)); + assertTrue(hAdmin.tableExists(table2_restore)); + hAdmin.close(); + // #5.2 - checking row count of tables for full restore + HTable hTable = (HTable) conn.getTable(table1_restore); + Assert.assertEquals(HBaseTestingUtility.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3); + hTable.close(); + hTable = (HTable) conn.getTable(table2_restore); + Assert.assertEquals(NB_ROWS_IN_BATCH, HBaseTestingUtility.countRows(hTable)); + hTable.close(); + // #6 - restore incremental backup for multiple tables, with overwrite + TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 }; + TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore }; + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, + false, tablesRestoreIncMultiple, tablesMapIncMultiple, true)); + hTable = (HTable) conn.getTable(table1_restore); + LOG.debug("After incremental restore: " + hTable.getDescriptor()); + LOG.debug("f1 has " + TEST_UTIL.countRows(hTable, famName) + " rows"); + Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH + ADD_ROWS); + LOG.debug("f2 has " + TEST_UTIL.countRows(hTable, fam2Name) + " rows"); + Assert.assertEquals(TEST_UTIL.countRows(hTable, fam2Name), NB_ROWS_FAM2); + hTable.close(); + hTable = (HTable) conn.getTable(table2_restore); + Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtility.countRows(hTable)); + hTable.close(); + admin.close(); } - - long endSplitTime = EnvironmentEdgeManager.currentTime(); - - // split finished - LOG.debug("split finished in =" + (endSplitTime - startSplitTime)); - - // #3 - incremental backup for multiple tables - tables = Lists.newArrayList(table1, table2); - request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple = client.backupTables(request); - assertTrue(checkSucceeded(backupIdIncMultiple)); - - // add column family f2 to table1 - final byte[] fam2Name = Bytes.toBytes("f2"); - table1Desc.addFamily(new HColumnDescriptor(fam2Name)); - // drop column family f3 - table1Desc.removeFamily(fam3Name); - HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc); - - int NB_ROWS_FAM2 = 7; - HTable t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2); - t3.close(); - // Wait for 5 sec to make sure that old WALs were deleted - Thread.sleep(5000); - - // #3 - incremental backup for multiple tables - request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - String backupIdIncMultiple2 = client.backupTables(request); - assertTrue(checkSucceeded(backupIdIncMultiple2)); - - // #4 - restore full backup for all tables - TableName[] tablesRestoreFull = new TableName[] { table1, table2 }; - - TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore }; - - LOG.debug("Restoring full " + backupIdFull); - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, - tablesRestoreFull, tablesMapFull, true)); - - // #5.1 - check tables for full restore - HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin(); - assertTrue(hAdmin.tableExists(table1_restore)); - assertTrue(hAdmin.tableExists(table2_restore)); - - hAdmin.close(); - - // #5.2 - checking row count of tables for full restore - HTable hTable = (HTable) conn.getTable(table1_restore); - Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3); - hTable.close(); - - hTable = (HTable) conn.getTable(table2_restore); - Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH); - hTable.close(); - - // #6 - restore incremental backup for multiple tables, with overwrite - TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 }; - TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore }; - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, - false, tablesRestoreIncMultiple, tablesMapIncMultiple, true)); - - hTable = (HTable) conn.getTable(table1_restore); - LOG.debug("After incremental restore: " + hTable.getDescriptor()); - LOG.debug("f1 has " + TEST_UTIL.countRows(hTable, famName) + " rows"); - Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH + ADD_ROWS); - LOG.debug("f2 has " + TEST_UTIL.countRows(hTable, fam2Name) + " rows"); - Assert.assertEquals(TEST_UTIL.countRows(hTable, fam2Name), NB_ROWS_FAM2); - hTable.close(); - - hTable = (HTable) conn.getTable(table2_restore); - Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + 5); - hTable.close(); - - admin.close(); - conn.close(); - } - -} +} \ No newline at end of file -- 2.16.2