From 26f8f064254fe1b293827d466092ea8d51c81bd4 Mon Sep 17 00:00:00 2001 From: Vladimir Rodionov Date: Mon, 25 Jun 2018 16:05:47 -0700 Subject: [PATCH] HBASE-20631: Backup Merge command enhancement --- .../apache/hadoop/hbase/backup/BackupDriver.java | 4 + .../hbase/backup/BackupRestoreConstants.java | 2 + .../hadoop/hbase/backup/impl/BackupCommands.java | 83 +++++++++-- .../hadoop/hbase/backup/TestBackupMerge.java | 4 +- .../hbase/backup/TestBackupMergeCommand.java | 166 +++++++++++++++++++++ 5 files changed, 245 insertions(+), 14 deletions(-) create mode 100644 hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMergeCommand.java diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java index 6644d89812..a52e99e494 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java @@ -25,6 +25,8 @@ import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_KEEP; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_KEEP_DESC; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_LIST; +import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_MERGE_DAYS; +import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_MERGE_DAYS_DESC; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER; @@ -161,6 +163,8 @@ public class BackupDriver extends AbstractHBaseTool { addOptWithArg(OPTION_SET, OPTION_SET_DESC); addOptWithArg(OPTION_PATH, OPTION_PATH_DESC); addOptWithArg(OPTION_KEEP, OPTION_KEEP_DESC); + addOptWithArg(OPTION_MERGE_DAYS, OPTION_MERGE_DAYS_DESC); + addOptWithArg(OPTION_YARN_QUEUE_NAME, OPTION_YARN_QUEUE_NAME_DESC); } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java index 16ec3d2030..6e26f06299 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java @@ -84,6 +84,8 @@ public interface BackupRestoreConstants { String OPTION_KEEP = "k"; String OPTION_KEEP_DESC = "Specifies maximum age of backup (in days) to keep during bulk delete"; + String OPTION_MERGE_DAYS = "d"; + String OPTION_MERGE_DAYS_DESC = "Specifies period (in days) of incremental backups to merge."; String OPTION_TABLE_MAPPING = "m"; String OPTION_TABLE_MAPPING_DESC = diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java index fc2f1a239f..a85601388e 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java @@ -26,6 +26,8 @@ import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_DEBUG import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_KEEP; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_KEEP_DESC; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_LIST; +import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_MERGE_DAYS; +import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_MERGE_DAYS_DESC; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PATH_DESC; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_RECORD_NUMBER; @@ -120,8 +122,7 @@ public final class BackupCommands { + " remove remove tables from a set\n" + " list list all backup sets in the system\n" + " describe describe set\n" + " delete delete backup set\n"; - public static final String MERGE_CMD_USAGE = "Usage: hbase backup merge [backup_ids]\n" - + " backup_ids Comma separated list of backup image ids.\n"; + public static final String MERGE_CMD_USAGE = "Usage: hbase backup merge [options]\n"; public static final String USAGE_FOOTER = ""; @@ -203,6 +204,15 @@ public final class BackupCommands { protected boolean requiresConsistentState() { return false; } + + protected String[] convertToBackupIds(List history) { + String[] ids = new String[history.size()]; + for (int i = 0; i < ids.length; i++) { + ids[i] = history.get(i).getBackupId(); + } + return ids; + } + } private BackupCommands() { @@ -613,13 +623,6 @@ public final class BackupCommands { } } - private String[] convertToBackupIds(List history) { - String[] ids = new String[history.size()]; - for (int i = 0; i < ids.length; i++) { - ids[i] = history.get(i).getBackupId(); - } - return ids; - } private void executeDeleteListOfBackups(CommandLine cmdline) throws IOException { String value = cmdline.getOptionValue(OPTION_LIST); @@ -811,17 +814,31 @@ public final class BackupCommands { @Override public void execute() throws IOException { - super.execute(); String[] args = cmdline == null ? null : cmdline.getArgs(); - if (args == null || (args.length != 2)) { + if (args == null || (args.length != 1)) { System.err.println("ERROR: wrong number of arguments: " + (args == null ? null : args.length)); printUsage(); throw new IOException(INCORRECT_USAGE); } - String[] backupIds = args[1].split(","); + if (!cmdline.hasOption(OPTION_MERGE_DAYS) && !cmdline.hasOption(OPTION_LIST)) { + printUsage(); + throw new IOException(INCORRECT_USAGE); + } + super.execute(); + + if (cmdline.hasOption(OPTION_MERGE_DAYS)) { + executeMergeNewerThan(cmdline); + } else if (cmdline.hasOption(OPTION_LIST)) { + executeMergeList(cmdline); + } + } + + private void executeMergeList(CommandLine cmdline) throws IOException{ + String var = cmdline.getOptionValue(OPTION_LIST); + String[] backupIds = var.split(","); if (backupIds.length < 2) { String msg = "ERROR: can not merge a single backup image. "+ "Number of images must be greater than 1."; @@ -836,9 +853,51 @@ public final class BackupCommands { } } + private void executeMergeNewerThan(CommandLine cmdline) throws IOException { + String value = cmdline.getOptionValue(OPTION_MERGE_DAYS); + int days = 0; + try { + days = Integer.parseInt(value); + } catch (NumberFormatException e) { + throw new IOException(value + " is not an integer number"); + } + final long fdays = days; + BackupInfo.Filter dateFilter = new BackupInfo.Filter() { + @Override + public boolean apply(BackupInfo info) { + long currentTime = EnvironmentEdgeManager.currentTime(); + long maxTsNotToMerge = currentTime - fdays * 24 * 3600 * 1000; + return info.getCompleteTs() >= maxTsNotToMerge; + } + }; + List history = null; + try (final BackupSystemTable sysTable = new BackupSystemTable(conn); + BackupAdminImpl admin = new BackupAdminImpl(conn)) { + history = sysTable.getBackupHistory(-1, dateFilter); + String[] backupIds = convertToBackupIds(history); + if (backupIds.length < 2) { + String msg = "ERROR: can not merge a single backup image. "+ + "Number of images must be greater than 1."; + System.err.println(msg); + throw new IOException(msg); + } + admin.mergeBackups(backupIds); + } + } + @Override protected void printUsage() { System.out.println(MERGE_CMD_USAGE); + Options options = new Options(); + options.addOption(OPTION_MERGE_DAYS, true, OPTION_MERGE_DAYS_DESC); + options.addOption(OPTION_LIST, true, OPTION_BACKUP_LIST_DESC); + + HelpFormatter helpFormatter = new HelpFormatter(); + helpFormatter.setLeftPadding(2); + helpFormatter.setDescPadding(8); + helpFormatter.setWidth(100); + helpFormatter.setSyntaxPrefix("Options:"); + helpFormatter.printHelp(" ", null, options, USAGE_FOOTER); } } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java index 8ead548110..93304599f2 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java @@ -50,9 +50,8 @@ public class TestBackupMerge extends TestBackupBase { LoggerFactory.getLogger(TestBackupMerge.class); - @Test - public void TestIncBackupMergeRestore() throws Exception { + public void testIncBackupMergeRestore() throws Exception { int ADD_ROWS = 99; // #1 - create full backup for all tables LOG.info("create full backup image for all tables"); @@ -129,4 +128,5 @@ public class TestBackupMerge extends TestBackupBase { admin.close(); conn.close(); } + } diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMergeCommand.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMergeCommand.java new file mode 100644 index 0000000000..c9b4387258 --- /dev/null +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMergeCommand.java @@ -0,0 +1,166 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; +import org.apache.hadoop.hbase.backup.util.BackupUtils; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.EnvironmentEdge; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.util.ToolRunner; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import org.junit.Assert; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@Category(LargeTests.class) +public class TestBackupMergeCommand extends TestBackupBase { + + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestBackupMergeCommand.class); + + private static final Logger LOG = + LoggerFactory.getLogger(TestBackupMergeCommand.class); + + + + @Test + public void testIncBackupMergeRestoreCommand() throws Exception { + int ADD_ROWS = 99; + // #1 - create full backup for all tables + LOG.info("create full backup image for all tables"); + + List tables = Lists.newArrayList(table3); + // Set custom Merge Job implementation + + EnvironmentEdgeManager.injectEdge(new EnvironmentEdge() { + // time - 4 days + @Override + public long currentTime() { + return System.currentTimeMillis() - 4 * 24 * 3600 * 1000L ; + } + }); + + Connection conn = ConnectionFactory.createConnection(conf1); + + HTable t1 = insertIntoTable(conn, table3, famName, 1, ADD_ROWS); + + HBaseAdmin admin = (HBaseAdmin) conn.getAdmin(); + BackupAdminImpl client = new BackupAdminImpl(conn); + BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); + String backupIdFull = client.backupTables(request); + assertTrue(checkSucceeded(backupIdFull)); + + EnvironmentEdgeManager.injectEdge(new EnvironmentEdge() { + // time - 2 days + @Override + public long currentTime() { + return System.currentTimeMillis() - 2 * 24 * 3600 * 1000L ; + } + }); + + // #2 - insert some data to table3 + t1 = insertIntoTable(conn, table3, famName, 2, ADD_ROWS); + LOG.debug("writing " + ADD_ROWS + " rows to " + table3); + + Assert.assertEquals(TEST_UTIL.countRows(t1), 2 * ADD_ROWS); + t1.close(); + LOG.debug("written " + ADD_ROWS + " rows to " + table3); + + + // #3 - incremental backup for multiple tables + tables = Lists.newArrayList(table3); + request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); + String backupIdIncMultiple = client.backupTables(request); + + assertTrue(checkSucceeded(backupIdIncMultiple)); + + EnvironmentEdgeManager.injectEdge(new EnvironmentEdge() { + // time - 1 days + @Override + public long currentTime() { + return System.currentTimeMillis() - 1 * 24 * 3600 * 1000L ; + } + }); + + t1 = insertIntoTable(conn, table3, famName, 3, ADD_ROWS); + t1.close(); + + // #3 - incremental backup for multiple tables + request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); + String backupIdIncMultiple2 = client.backupTables(request); + assertTrue(checkSucceeded(backupIdIncMultiple2)); + + EnvironmentEdgeManager.reset(); + // Now merge last 3 days (2 last incremental backups) + + String[] args = new String[] { "merge", "-d", "3" }; + try { + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + } catch (Exception e) { + LOG.error("failed", e); + Assert.fail(e.getMessage()); + } + + // Get last backupId + List infos = client.getHistory(10); + assertEquals(2, infos.size()); + BackupInfo bi = infos.get(0); + assertTrue(bi.getType() == BackupType.INCREMENTAL); + String backupId = bi.getBackupId(); + + // #6 - restore incremental backup for multiple tables, with overwrite + TableName[] tablesRestoreIncMultiple = new TableName[] { table3 }; + TableName[] tablesMapIncMultiple = new TableName[] { table3_restore }; + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, + tablesRestoreIncMultiple, tablesMapIncMultiple, true)); + + Table hTable = conn.getTable(table3_restore); + LOG.debug("After incremental restore: " + hTable.getTableDescriptor()); + int countRows = TEST_UTIL.countRows(hTable, famName); + LOG.debug("f1 has " + countRows + " rows"); + Assert.assertEquals(3 * ADD_ROWS, countRows); + + hTable.close(); + + hTable = conn.getTable(table3_restore); + Assert.assertEquals(3 * ADD_ROWS, TEST_UTIL.countRows(hTable)); + hTable.close(); + + admin.close(); + conn.close(); + } + +} -- 2.14.3 (Apple Git-98)