diff --git hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java index c6aa5f0098..e5ecdb4a62 100644 --- hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java +++ hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java @@ -32,7 +32,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -77,7 +77,7 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor if (connection == null) { connection = ConnectionFactory.createConnection(conf); } - try (BackupSystemTable tbl = new BackupSystemTable(connection)) { + try (BackupMetaTable tbl = new BackupMetaTable(connection)) { Map>[] res = tbl.readBulkLoadedFiles(null, tableList); secondPrevReadFromBackupTbl = prevReadFromBackupTbl; @@ -99,7 +99,7 @@ public class BackupHFileCleaner extends BaseHFileCleanerDelegate implements Abor // so that we filter BulkLoad to be returned from server if (checkForFullyBackedUpTables) { if (connection == null) return files; - try (BackupSystemTable tbl = new BackupSystemTable(connection)) { + try (BackupMetaTable tbl = new BackupMetaTable(connection)) { fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL); } catch (IOException ioe) { LOG.error("Failed to get tables which have been fully backed up, skipping checking", ioe); diff --git hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java index 1d8f780b38..dd633b056d 100644 --- hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java +++ hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupObserver.java @@ -30,7 +30,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupManager; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionInfo; @@ -67,7 +67,7 @@ public class BackupObserver implements RegionCoprocessor, RegionObserver { return; } try (Connection connection = ConnectionFactory.createConnection(cfg); - BackupSystemTable tbl = new BackupSystemTable(connection)) { + BackupMetaTable tbl = new BackupMetaTable(connection)) { List fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL); RegionInfo info = ctx.getEnvironment().getRegionInfo(); TableName tableName = info.getTable(); @@ -91,7 +91,7 @@ public class BackupObserver implements RegionCoprocessor, RegionObserver { return; } try (Connection connection = ConnectionFactory.createConnection(cfg); - BackupSystemTable tbl = new BackupSystemTable(connection)) { + BackupMetaTable tbl = new BackupMetaTable(connection)) { List fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL); RegionInfo info = ctx.getEnvironment().getRegionInfo(); TableName tableName = info.getTable(); diff --git hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java index c8e3474677..16244c9e8f 100644 --- hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java +++ hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java @@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; import org.apache.hadoop.hbase.backup.impl.BackupManager; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; @@ -189,7 +189,7 @@ public class RestoreDriver extends AbstractHBaseTool { private String getTablesForSet(Connection conn, String name, Configuration conf) throws IOException { - try (final BackupSystemTable table = new BackupSystemTable(conn)) { + try (final BackupMetaTable table = new BackupMetaTable(conn)) { List tables = table.describeBackupSet(name); if (tables == null) return null; return StringUtils.join(tables, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND); diff --git hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java index 9a20b7b749..185362c5eb 100644 --- hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java +++ hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java @@ -46,11 +46,11 @@ import org.apache.hadoop.hbase.backup.HBackupFileSystem; import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.backup.util.BackupSet; import org.apache.hadoop.hbase.backup.util.BackupUtils; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class BackupAdminImpl implements BackupAdmin { @@ -72,7 +72,7 @@ public class BackupAdminImpl implements BackupAdmin { @Override public BackupInfo getBackupInfo(String backupId) throws IOException { BackupInfo backupInfo = null; - try (final BackupSystemTable table = new BackupSystemTable(conn)) { + try (final BackupMetaTable table = new BackupMetaTable(conn)) { if (backupId == null) { ArrayList recentSessions = table.getBackupInfos(BackupState.RUNNING); if (recentSessions.isEmpty()) { @@ -97,7 +97,7 @@ public class BackupAdminImpl implements BackupAdmin { boolean deleteSessionStarted = false; boolean snapshotDone = false; - try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) { + try (final BackupMetaTable sysTable = new BackupMetaTable(conn)) { // Step 1: Make sure there is no active session // is running by using startBackupSession API @@ -123,8 +123,8 @@ public class BackupAdminImpl implements BackupAdmin { // Step 3: Record delete session sysTable.startDeleteOperation(backupIds); // Step 4: Snapshot backup system table - if (!BackupSystemTable.snapshotExists(conn)) { - BackupSystemTable.snapshot(conn); + if (!BackupMetaTable.snapshotExists(conn)) { + BackupMetaTable.snapshot(conn); } else { LOG.warn("Backup system table snapshot exists"); } @@ -147,15 +147,15 @@ public class BackupAdminImpl implements BackupAdmin { // Finish sysTable.finishDeleteOperation(); // delete snapshot - BackupSystemTable.deleteSnapshot(conn); + BackupMetaTable.deleteSnapshot(conn); } catch (IOException e) { // Fail delete operation // Step 1 if (snapshotDone) { - if (BackupSystemTable.snapshotExists(conn)) { - BackupSystemTable.restoreFromSnapshot(conn); + if (BackupMetaTable.snapshotExists(conn)) { + BackupMetaTable.restoreFromSnapshot(conn); // delete snapshot - BackupSystemTable.deleteSnapshot(conn); + BackupMetaTable.deleteSnapshot(conn); // We still have record with unfinished delete operation LOG.error("Delete operation failed, please run backup repair utility to restore " + "backup system integrity", e); @@ -181,7 +181,7 @@ public class BackupAdminImpl implements BackupAdmin { * @throws IOException */ - private void finalizeDelete(Map> tablesMap, BackupSystemTable table) + private void finalizeDelete(Map> tablesMap, BackupMetaTable table) throws IOException { for (String backupRoot : tablesMap.keySet()) { Set incrTableSet = table.getIncrementalBackupTableSet(backupRoot); @@ -220,7 +220,7 @@ public class BackupAdminImpl implements BackupAdmin { * @return total number of deleted backup images * @throws IOException */ - private int deleteBackup(String backupId, BackupSystemTable sysTable) throws IOException { + private int deleteBackup(String backupId, BackupMetaTable sysTable) throws IOException { BackupInfo backupInfo = sysTable.readBackupInfo(backupId); @@ -271,7 +271,7 @@ public class BackupAdminImpl implements BackupAdmin { LOG.debug(numDeleted + " bulk loaded files out of " + map.size() + " were deleted"); } if (success) { - sysTable.deleteBulkLoadedFiles(map); + sysTable.deleteBulkLoadedRows(new ArrayList(map.keySet())); } sysTable.deleteBackupInfo(backupInfo.getBackupId()); @@ -284,7 +284,7 @@ public class BackupAdminImpl implements BackupAdmin { } private void - removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable) + removeTableFromBackupImage(BackupInfo info, TableName tn, BackupMetaTable sysTable) throws IOException { List tables = info.getTableNames(); LOG.debug("Remove " + tn + " from " + info.getBackupId() + " tables=" @@ -308,7 +308,7 @@ public class BackupAdminImpl implements BackupAdmin { } private List getAffectedBackupSessions(BackupInfo backupInfo, TableName tn, - BackupSystemTable table) throws IOException { + BackupMetaTable table) throws IOException { LOG.debug("GetAffectedBackupInfos for: " + backupInfo.getBackupId() + " table=" + tn); long ts = backupInfo.getStartTs(); List list = new ArrayList(); @@ -367,7 +367,7 @@ public class BackupAdminImpl implements BackupAdmin { } } - private boolean isLastBackupSession(BackupSystemTable table, TableName tn, long startTime) + private boolean isLastBackupSession(BackupMetaTable table, TableName tn, long startTime) throws IOException { List history = table.getBackupHistory(); for (BackupInfo info : history) { @@ -386,7 +386,7 @@ public class BackupAdminImpl implements BackupAdmin { @Override public List getHistory(int n) throws IOException { - try (final BackupSystemTable table = new BackupSystemTable(conn)) { + try (final BackupMetaTable table = new BackupMetaTable(conn)) { List history = table.getBackupHistory(); if (history.size() <= n) return history; List list = new ArrayList(); @@ -400,7 +400,7 @@ public class BackupAdminImpl implements BackupAdmin { @Override public List getHistory(int n, BackupInfo.Filter... filters) throws IOException { if (filters.length == 0) return getHistory(n); - try (final BackupSystemTable table = new BackupSystemTable(conn)) { + try (final BackupMetaTable table = new BackupMetaTable(conn)) { List history = table.getBackupHistory(); List result = new ArrayList(); for (BackupInfo bi : history) { @@ -422,7 +422,7 @@ public class BackupAdminImpl implements BackupAdmin { @Override public List listBackupSets() throws IOException { - try (final BackupSystemTable table = new BackupSystemTable(conn)) { + try (final BackupMetaTable table = new BackupMetaTable(conn)) { List list = table.listBackupSets(); List bslist = new ArrayList(); for (String s : list) { @@ -437,7 +437,7 @@ public class BackupAdminImpl implements BackupAdmin { @Override public BackupSet getBackupSet(String name) throws IOException { - try (final BackupSystemTable table = new BackupSystemTable(conn)) { + try (final BackupMetaTable table = new BackupMetaTable(conn)) { List list = table.describeBackupSet(name); if (list == null) return null; return new BackupSet(name, list); @@ -446,7 +446,7 @@ public class BackupAdminImpl implements BackupAdmin { @Override public boolean deleteBackupSet(String name) throws IOException { - try (final BackupSystemTable table = new BackupSystemTable(conn)) { + try (final BackupMetaTable table = new BackupMetaTable(conn)) { if (table.describeBackupSet(name) == null) { return false; } @@ -458,7 +458,7 @@ public class BackupAdminImpl implements BackupAdmin { @Override public void addToBackupSet(String name, TableName[] tables) throws IOException { String[] tableNames = new String[tables.length]; - try (final BackupSystemTable table = new BackupSystemTable(conn); + try (final BackupMetaTable table = new BackupMetaTable(conn); final Admin admin = conn.getAdmin()) { for (int i = 0; i < tables.length; i++) { tableNames[i] = tables[i].getNameAsString(); @@ -475,7 +475,7 @@ public class BackupAdminImpl implements BackupAdmin { @Override public void removeFromBackupSet(String name, TableName[] tables) throws IOException { LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "'"); - try (final BackupSystemTable table = new BackupSystemTable(conn)) { + try (final BackupMetaTable table = new BackupMetaTable(conn)) { table.removeFromBackupSet(name, toStringArray(tables)); LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "' completed."); @@ -523,7 +523,7 @@ public class BackupAdminImpl implements BackupAdmin { String backupId = BackupRestoreConstants.BACKUPID_PREFIX + EnvironmentEdgeManager.currentTime(); if (type == BackupType.INCREMENTAL) { Set incrTableSet = null; - try (BackupSystemTable table = new BackupSystemTable(conn)) { + try (BackupMetaTable table = new BackupMetaTable(conn)) { incrTableSet = table.getIncrementalBackupTableSet(targetRootDir); } @@ -615,7 +615,7 @@ public class BackupAdminImpl implements BackupAdmin { @Override public void mergeBackups(String[] backupIds) throws IOException { - try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) { + try (final BackupMetaTable sysTable = new BackupMetaTable(conn)) { checkIfValidForMerge(backupIds, sysTable); BackupMergeJob job = BackupRestoreFactory.getBackupMergeJob(conn.getConfiguration()); job.run(backupIds); @@ -636,7 +636,7 @@ public class BackupAdminImpl implements BackupAdmin { * @param table backup system table * @throws IOException */ - private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table) throws IOException { + private void checkIfValidForMerge(String[] backupIds, BackupMetaTable table) throws IOException { String backupRoot = null; final Set allTables = new HashSet(); diff --git hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java index 456674542f..0ce27f61fd 100644 --- hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java +++ hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java @@ -144,7 +144,7 @@ public final class BackupCommands { conn = ConnectionFactory.createConnection(getConf()); if (requiresNoActiveSession()) { // Check active session - try (BackupSystemTable table = new BackupSystemTable(conn)) { + try (BackupMetaTable table = new BackupMetaTable(conn)) { List sessions = table.getBackupInfos(BackupState.RUNNING); if (sessions.size() > 0) { @@ -158,7 +158,7 @@ public final class BackupCommands { } if (requiresConsistentState()) { // Check failed delete - try (BackupSystemTable table = new BackupSystemTable(conn)) { + try (BackupMetaTable table = new BackupMetaTable(conn)) { String[] ids = table.getListOfBackupIdsFromDeleteOperation(); if (ids != null && ids.length > 0) { @@ -368,7 +368,7 @@ public final class BackupCommands { } private String getTablesForSet(String name, Configuration conf) throws IOException { - try (final BackupSystemTable table = new BackupSystemTable(conn)) { + try (final BackupMetaTable table = new BackupMetaTable(conn)) { List tables = table.describeBackupSet(name); if (tables == null) return null; return StringUtils.join(tables, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND); @@ -470,7 +470,7 @@ public final class BackupCommands { super.execute(); String backupId = args[1]; - try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) { + try (final BackupMetaTable sysTable = new BackupMetaTable(conn)) { BackupInfo info = sysTable.readBackupInfo(backupId); if (info == null) { System.out.println("ERROR: " + backupId + " does not exist"); @@ -511,7 +511,7 @@ public final class BackupCommands { super.execute(); String backupId = (args == null || args.length <= 1) ? null : args[1]; - try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) { + try (final BackupMetaTable sysTable = new BackupMetaTable(conn)) { BackupInfo info = null; if (backupId != null) { @@ -605,7 +605,7 @@ public final class BackupCommands { Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); try (final Connection conn = ConnectionFactory.createConnection(conf); - final BackupSystemTable sysTable = new BackupSystemTable(conn)) { + final BackupMetaTable sysTable = new BackupMetaTable(conn)) { // Failed backup BackupInfo backupInfo; List list = sysTable.getBackupInfos(BackupState.RUNNING); @@ -641,19 +641,19 @@ public final class BackupCommands { } } - private void repairFailedBackupDeletionIfAny(Connection conn, BackupSystemTable sysTable) + private void repairFailedBackupDeletionIfAny(Connection conn, BackupMetaTable sysTable) throws IOException { String[] backupIds = sysTable.getListOfBackupIdsFromDeleteOperation(); if (backupIds == null || backupIds.length == 0) { System.out.println("No failed backup DELETE operation found"); // Delete backup table snapshot if exists - BackupSystemTable.deleteSnapshot(conn); + BackupMetaTable.deleteSnapshot(conn); return; } System.out.println("Found failed DELETE operation for: " + StringUtils.join(backupIds)); System.out.println("Running DELETE again ..."); // Restore table from snapshot - BackupSystemTable.restoreFromSnapshot(conn); + BackupMetaTable.restoreFromSnapshot(conn); // Finish previous failed session sysTable.finishBackupExclusiveOperation(); try (BackupAdmin admin = new BackupAdminImpl(conn)) { @@ -663,19 +663,19 @@ public final class BackupCommands { } - private void repairFailedBackupMergeIfAny(Connection conn, BackupSystemTable sysTable) + private void repairFailedBackupMergeIfAny(Connection conn, BackupMetaTable sysTable) throws IOException { String[] backupIds = sysTable.getListOfBackupIdsFromMergeOperation(); if (backupIds == null || backupIds.length == 0) { System.out.println("No failed backup MERGE operation found"); // Delete backup table snapshot if exists - BackupSystemTable.deleteSnapshot(conn); + BackupMetaTable.deleteSnapshot(conn); return; } System.out.println("Found failed MERGE operation for: " + StringUtils.join(backupIds)); System.out.println("Running MERGE again ..."); // Restore table from snapshot - BackupSystemTable.restoreFromSnapshot(conn); + BackupMetaTable.restoreFromSnapshot(conn); // Unlock backupo system sysTable.finishBackupExclusiveOperation(); // Finish previous failed session @@ -779,7 +779,7 @@ public final class BackupCommands { if (backupRootPath == null) { // Load from backup system table super.execute(); - try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) { + try (final BackupMetaTable sysTable = new BackupMetaTable(conn)) { history = sysTable.getBackupHistory(n, tableNameFilter, tableSetFilter); } } else { @@ -919,7 +919,7 @@ public final class BackupCommands { super.execute(); String setName = args[2]; - try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) { + try (final BackupMetaTable sysTable = new BackupMetaTable(conn)) { List tables = sysTable.describeBackupSet(setName); BackupSet set = tables == null ? null : new BackupSet(setName, tables); if (set == null) { diff --git hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java index 7ac94d8e4a..6863548362 100644 --- hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java +++ hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.BackupObserver; import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.HBackupFileSystem; @@ -43,13 +44,13 @@ import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; import org.apache.hadoop.hbase.backup.master.BackupLogCleaner; import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.procedure.ProcedureManagerHost; -import org.apache.hadoop.hbase.util.Pair; - import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.yetus.audience.InterfaceAudience; /** * Handles backup requests, creates backup info records in backup system table to @@ -61,7 +62,7 @@ public class BackupManager implements Closeable { protected Configuration conf = null; protected BackupInfo backupInfo = null; - protected BackupSystemTable systemTable; + protected BackupMetaTable systemTable; protected final Connection conn; /** @@ -78,7 +79,7 @@ public class BackupManager implements Closeable { } this.conf = conf; this.conn = conn; - this.systemTable = new BackupSystemTable(conn); + this.systemTable = new BackupMetaTable(conn); } @@ -140,10 +141,14 @@ public class BackupManager implements Closeable { conf.set(ProcedureManagerHost.REGIONSERVER_PROCEDURE_CONF_KEY, classes + "," + regionProcedureClass); } + String coproc = conf.get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY); + String regionObserverClass = BackupObserver.class.getName(); + conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, (coproc == null ? "" : coproc + ",") + + regionObserverClass); if (LOG.isDebugEnabled()) { - LOG.debug("Added region procedure manager: " + regionProcedureClass); + LOG.debug("Added region procedure manager: " + regionProcedureClass+"\nAdded region observer: " + + regionObserverClass); } - } public static boolean isBackupEnabled(Configuration conf) { @@ -208,7 +213,7 @@ public class BackupManager implements Closeable { tableList = new ArrayList<>(); for (HTableDescriptor hTableDescriptor : htds) { TableName tn = hTableDescriptor.getTableName(); - if (tn.equals(BackupSystemTable.getTableName(conf))) { + if (tn.equals(BackupMetaTable.getTableName(conf))) { // skip backup system table continue; } @@ -415,13 +420,8 @@ public class BackupManager implements Closeable { return systemTable.readBulkloadRows(tableList); } - public void removeBulkLoadedRows(List lst, List rows) throws IOException { - systemTable.removeBulkLoadedRows(lst, rows); - } - - public void writeBulkLoadedFiles(List sTableList, Map>[] maps) - throws IOException { - systemTable.writeBulkLoadedFiles(sTableList, maps, backupInfo.getBackupId()); + public void deleteBulkLoadedRows(List rows) throws IOException { + systemTable.deleteBulkLoadedRows(rows); } /** @@ -492,7 +492,7 @@ public class BackupManager implements Closeable { * @return WAL files iterator from backup system table * @throws IOException */ - public Iterator getWALFilesFromBackupSystem() throws IOException { + public Iterator getWALFilesFromBackupSystem() throws IOException { return systemTable.getWALFilesIterator(backupInfo.getBackupRootDir()); } diff --git hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupMetaTable.java similarity index 87% rename from hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java rename to hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupMetaTable.java index ebfc9f3333..80bb6e90c8 100644 --- hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupMetaTable.java @@ -42,8 +42,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -53,6 +51,8 @@ import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; @@ -62,6 +62,8 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.SnapshotDescription; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; @@ -70,9 +72,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; /** - * This class provides API to access backup system table
+ * This class provides API to access backup meta table
* - * Backup system table schema:
+ * Backup meta table schema:
*

    *
  • 1. Backup sessions rowkey= "session:"+backupId; value =serialized BackupInfo
  • *
  • 2. Backup start code rowkey = "startcode:"+backupRoot; value = startcode
  • @@ -86,8 +88,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; */ @InterfaceAudience.Private -public final class BackupSystemTable implements Closeable { - private static final Log LOG = LogFactory.getLog(BackupSystemTable.class); +public final class BackupMetaTable implements Closeable { + private static final Log LOG = LogFactory.getLog(BackupMetaTable.class); static class WALItem { String backupId; @@ -119,7 +121,21 @@ public final class BackupSystemTable implements Closeable { } + /** + * Backup system table (main) name + */ private TableName tableName; + + /** + * Backup System table name for bulk loaded files. + * We keep all bulk loaded file references in a separate table + * because we have to isolate general backup operations: create, merge etc + * from activity of RegionObserver, which controls process of a bulk loading + * {@link org.apache.hadoop.hbase.backup.BackupObserver} + */ + + private TableName bulkLoadTableName; + /** * Stores backup sessions (contexts) */ @@ -169,22 +185,31 @@ public final class BackupSystemTable implements Closeable { // Safe delimiter in a string private final static String NULL = "\u0000"; - public BackupSystemTable(Connection conn) throws IOException { + public BackupMetaTable(Connection conn) throws IOException { this.connection = conn; - tableName = BackupSystemTable.getTableName(conn.getConfiguration()); + Configuration conf = this.connection.getConfiguration(); + tableName = BackupMetaTable.getTableName(conf); + bulkLoadTableName = BackupMetaTable.getTableNameForBulkLoadedData(conf); checkSystemTable(); } private void checkSystemTable() throws IOException { try (Admin admin = connection.getAdmin()) { verifyNamespaceExists(admin); - + Configuration conf = connection.getConfiguration(); if (!admin.tableExists(tableName)) { - HTableDescriptor backupHTD = - BackupSystemTable.getSystemTableDescriptor(connection.getConfiguration()); + TableDescriptor backupHTD = + BackupMetaTable.getSystemTableDescriptor(conf); admin.createTable(backupHTD); } - waitForSystemTable(admin); + if (!admin.tableExists(bulkLoadTableName)) { + TableDescriptor blHTD = + BackupMetaTable.getSystemTableForBulkLoadedDataDescriptor(conf); + admin.createTable(blHTD); + } + waitForSystemTable(admin, tableName); + waitForSystemTable(admin, bulkLoadTableName); + } } @@ -204,7 +229,7 @@ public final class BackupSystemTable implements Closeable { } } - private void waitForSystemTable(Admin admin) throws IOException { + private void waitForSystemTable(Admin admin, TableName tableName) throws IOException { long TIMEOUT = 60000; long startTime = EnvironmentEdgeManager.currentTime(); while (!admin.tableExists(tableName) || !admin.isTableAvailable(tableName)) { @@ -213,10 +238,11 @@ public final class BackupSystemTable implements Closeable { } catch (InterruptedException e) { } if (EnvironmentEdgeManager.currentTime() - startTime > TIMEOUT) { - throw new IOException("Failed to create backup system table after " + TIMEOUT + "ms"); + throw new IOException("Failed to create backup system table "+ + tableName +" after " + TIMEOUT + "ms"); } } - LOG.debug("Backup table exists and available"); + LOG.debug("Backup table "+tableName+" exists and available"); } @@ -247,8 +273,8 @@ public final class BackupSystemTable implements Closeable { * @return Map of rows to path of bulk loaded hfile */ Map readBulkLoadedFiles(String backupId) throws IOException { - Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId); - try (Table table = connection.getTable(tableName); + Scan scan = BackupMetaTable.createScanForBulkLoadedFiles(backupId); + try (Table table = connection.getTable(bulkLoadTableName); ResultScanner scanner = table.getScanner(scan)) { Result res = null; Map map = new TreeMap<>(Bytes.BYTES_COMPARATOR); @@ -256,8 +282,8 @@ public final class BackupSystemTable implements Closeable { res.advance(); byte[] row = CellUtil.cloneRow(res.listCells().get(0)); for (Cell cell : res.listCells()) { - if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0, - BackupSystemTable.PATH_COL.length) == 0) { + if (CellUtil.compareQualifiers(cell, BackupMetaTable.PATH_COL, 0, + BackupMetaTable.PATH_COL.length) == 0) { map.put(row, Bytes.toString(CellUtil.cloneValue(cell))); } } @@ -274,9 +300,9 @@ public final class BackupSystemTable implements Closeable { */ public Map>[] readBulkLoadedFiles(String backupId, List sTableList) throws IOException { - Scan scan = BackupSystemTable.createScanForBulkLoadedFiles(backupId); + Scan scan = BackupMetaTable.createScanForBulkLoadedFiles(backupId); Map>[] mapForSrc = new Map[sTableList == null ? 1 : sTableList.size()]; - try (Table table = connection.getTable(tableName); + try (Table table = connection.getTable(bulkLoadTableName); ResultScanner scanner = table.getScanner(scan)) { Result res = null; while ((res = scanner.next()) != null) { @@ -285,14 +311,14 @@ public final class BackupSystemTable implements Closeable { byte[] fam = null; String path = null; for (Cell cell : res.listCells()) { - if (CellUtil.compareQualifiers(cell, BackupSystemTable.TBL_COL, 0, - BackupSystemTable.TBL_COL.length) == 0) { + if (CellUtil.compareQualifiers(cell, BackupMetaTable.TBL_COL, 0, + BackupMetaTable.TBL_COL.length) == 0) { tbl = TableName.valueOf(CellUtil.cloneValue(cell)); - } else if (CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0, - BackupSystemTable.FAM_COL.length) == 0) { + } else if (CellUtil.compareQualifiers(cell, BackupMetaTable.FAM_COL, 0, + BackupMetaTable.FAM_COL.length) == 0) { fam = CellUtil.cloneValue(cell); - } else if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0, - BackupSystemTable.PATH_COL.length) == 0) { + } else if (CellUtil.compareQualifiers(cell, BackupMetaTable.PATH_COL, 0, + BackupMetaTable.PATH_COL.length) == 0) { path = Bytes.toString(CellUtil.cloneValue(cell)); } } @@ -321,18 +347,6 @@ public final class BackupSystemTable implements Closeable { } } - /* - * @param map Map of row keys to path of bulk loaded hfile - */ - void deleteBulkLoadedFiles(Map map) throws IOException { - try (Table table = connection.getTable(tableName)) { - List dels = new ArrayList<>(); - for (byte[] row : map.keySet()) { - dels.add(new Delete(row).addFamily(BackupSystemTable.META_FAMILY)); - } - table.delete(dels); - } - } /** * Deletes backup status from backup system table table @@ -363,8 +377,8 @@ public final class BackupSystemTable implements Closeable { LOG.debug("write bulk load descriptor to backup " + tabName + " with " + finalPaths.size() + " entries"); } - try (Table table = connection.getTable(tableName)) { - List puts = BackupSystemTable.createPutForCommittedBulkload(tabName, region, finalPaths); + try (Table table = connection.getTable(bulkLoadTableName)) { + List puts = BackupMetaTable.createPutForCommittedBulkload(tabName, region, finalPaths); table.put(puts); LOG.debug("written " + puts.size() + " rows for bulk load of " + tabName); } @@ -383,9 +397,9 @@ public final class BackupSystemTable implements Closeable { LOG.debug("write bulk load descriptor to backup " + tabName + " with " + pairs.size() + " entries"); } - try (Table table = connection.getTable(tableName)) { + try (Table table = connection.getTable(bulkLoadTableName)) { List puts = - BackupSystemTable.createPutForPreparedBulkload(tabName, region, family, pairs); + BackupMetaTable.createPutForPreparedBulkload(tabName, region, family, pairs); table.put(puts); LOG.debug("written " + puts.size() + " rows for bulk load of " + tabName); } @@ -396,8 +410,8 @@ public final class BackupSystemTable implements Closeable { * @param lst list of table names * @param rows the rows to be deleted */ - public void removeBulkLoadedRows(List lst, List rows) throws IOException { - try (Table table = connection.getTable(tableName)) { + public void deleteBulkLoadedRows(List rows) throws IOException { + try (Table table = connection.getTable(bulkLoadTableName)) { List lstDels = new ArrayList<>(); for (byte[] row : rows) { Delete del = new Delete(row); @@ -405,7 +419,7 @@ public final class BackupSystemTable implements Closeable { LOG.debug("orig deleting the row: " + Bytes.toString(row)); } table.delete(lstDels); - LOG.debug("deleted " + rows.size() + " original bulkload rows for " + lst.size() + " tables"); + LOG.debug("deleted " + rows.size() + " original bulkload rows"); } } @@ -420,9 +434,9 @@ public final class BackupSystemTable implements Closeable { Map>>>> map = new HashMap<>(); List rows = new ArrayList<>(); for (TableName tTable : tableList) { - Scan scan = BackupSystemTable.createScanForOrigBulkLoadedFiles(tTable); + Scan scan = BackupMetaTable.createScanForOrigBulkLoadedFiles(tTable); Map>>> tblMap = map.get(tTable); - try (Table table = connection.getTable(tableName); + try (Table table = connection.getTable(bulkLoadTableName); ResultScanner scanner = table.getScanner(scan)) { Result res = null; while ((res = scanner.next()) != null) { @@ -436,17 +450,17 @@ public final class BackupSystemTable implements Closeable { row = CellUtil.cloneRow(cell); rows.add(row); String rowStr = Bytes.toString(row); - region = BackupSystemTable.getRegionNameFromOrigBulkLoadRow(rowStr); - if (CellUtil.compareQualifiers(cell, BackupSystemTable.FAM_COL, 0, - BackupSystemTable.FAM_COL.length) == 0) { + region = BackupMetaTable.getRegionNameFromOrigBulkLoadRow(rowStr); + if (CellUtil.compareQualifiers(cell, BackupMetaTable.FAM_COL, 0, + BackupMetaTable.FAM_COL.length) == 0) { fam = Bytes.toString(CellUtil.cloneValue(cell)); - } else if (CellUtil.compareQualifiers(cell, BackupSystemTable.PATH_COL, 0, - BackupSystemTable.PATH_COL.length) == 0) { + } else if (CellUtil.compareQualifiers(cell, BackupMetaTable.PATH_COL, 0, + BackupMetaTable.PATH_COL.length) == 0) { path = Bytes.toString(CellUtil.cloneValue(cell)); - } else if (CellUtil.compareQualifiers(cell, BackupSystemTable.STATE_COL, 0, - BackupSystemTable.STATE_COL.length) == 0) { + } else if (CellUtil.compareQualifiers(cell, BackupMetaTable.STATE_COL, 0, + BackupMetaTable.STATE_COL.length) == 0) { byte[] state = CellUtil.cloneValue(cell); - if (Bytes.equals(BackupSystemTable.BL_PREPARE, state)) { + if (Bytes.equals(BackupMetaTable.BL_PREPARE, state)) { raw = true; } else raw = false; } @@ -477,7 +491,7 @@ public final class BackupSystemTable implements Closeable { */ public void writeBulkLoadedFiles(List sTableList, Map>[] maps, String backupId) throws IOException { - try (Table table = connection.getTable(tableName)) { + try (Table table = connection.getTable(bulkLoadTableName)) { long ts = EnvironmentEdgeManager.currentTime(); int cnt = 0; List puts = new ArrayList<>(); @@ -490,7 +504,7 @@ public final class BackupSystemTable implements Closeable { List paths = entry.getValue(); for (Path p : paths) { Put put = - BackupSystemTable.createPutForBulkLoadedFile(tn, fam, p.toString(), backupId, ts, + BackupMetaTable.createPutForBulkLoadedFile(tn, fam, p.toString(), backupId, ts, cnt++); puts.add(put); } @@ -1267,21 +1281,28 @@ public final class BackupSystemTable implements Closeable { * Get backup system table descriptor * @return table's descriptor */ - public static HTableDescriptor getSystemTableDescriptor(Configuration conf) { + public static TableDescriptor getSystemTableDescriptor(Configuration conf) { + + TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(getTableName(conf)); + + ColumnFamilyDescriptorBuilder colBuilder = + ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY); - HTableDescriptor tableDesc = new HTableDescriptor(getTableName(conf)); - HColumnDescriptor colSessionsDesc = new HColumnDescriptor(SESSIONS_FAMILY); - colSessionsDesc.setMaxVersions(1); - // Time to keep backup sessions (secs) + colBuilder.setMaxVersions(1); Configuration config = HBaseConfiguration.create(); int ttl = config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY, BackupRestoreConstants.BACKUP_SYSTEM_TTL_DEFAULT); - colSessionsDesc.setTimeToLive(ttl); - tableDesc.addFamily(colSessionsDesc); - HColumnDescriptor colMetaDesc = new HColumnDescriptor(META_FAMILY); - tableDesc.addFamily(colMetaDesc); - return tableDesc; + colBuilder.setTimeToLive(ttl); + + ColumnFamilyDescriptor colSessionsDesc = colBuilder.build(); + builder.addColumnFamily(colSessionsDesc); + + colBuilder = + ColumnFamilyDescriptorBuilder.newBuilder(META_FAMILY); + colBuilder.setTimeToLive(ttl); + builder.addColumnFamily(colBuilder.build()); + return builder.build(); } public static TableName getTableName(Configuration conf) { @@ -1300,6 +1321,38 @@ public final class BackupSystemTable implements Closeable { } /** + * Get backup system table descriptor + * @return table's descriptor + */ + public static TableDescriptor getSystemTableForBulkLoadedDataDescriptor(Configuration conf) { + + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(getTableNameForBulkLoadedData(conf)); + + ColumnFamilyDescriptorBuilder colBuilder = + ColumnFamilyDescriptorBuilder.newBuilder(SESSIONS_FAMILY); + colBuilder.setMaxVersions(1); + Configuration config = HBaseConfiguration.create(); + int ttl = + config.getInt(BackupRestoreConstants.BACKUP_SYSTEM_TTL_KEY, + BackupRestoreConstants.BACKUP_SYSTEM_TTL_DEFAULT); + colBuilder.setTimeToLive(ttl); + ColumnFamilyDescriptor colSessionsDesc = colBuilder.build(); + builder.addColumnFamily(colSessionsDesc); + colBuilder = + ColumnFamilyDescriptorBuilder.newBuilder(META_FAMILY); + colBuilder.setTimeToLive(ttl); + builder.addColumnFamily(colBuilder.build()); + return builder.build(); + } + + public static TableName getTableNameForBulkLoadedData(Configuration conf) { + String name = + conf.get(BackupRestoreConstants.BACKUP_SYSTEM_TABLE_NAME_KEY, + BackupRestoreConstants.BACKUP_SYSTEM_TABLE_NAME_DEFAULT) + "_bulk"; + return TableName.valueOf(name); + } + /** * Creates Put operation for a given backup info object * @param context backup info * @return put operation @@ -1307,7 +1360,7 @@ public final class BackupSystemTable implements Closeable { */ private Put createPutForBackupInfo(BackupInfo context) throws IOException { Put put = new Put(rowkey(BACKUP_INFO_PREFIX, context.getBackupId())); - put.addColumn(BackupSystemTable.SESSIONS_FAMILY, Bytes.toBytes("context"), + put.addColumn(BackupMetaTable.SESSIONS_FAMILY, Bytes.toBytes("context"), context.toByteArray()); return put; } @@ -1320,7 +1373,7 @@ public final class BackupSystemTable implements Closeable { */ private Get createGetForBackupInfo(String backupId) throws IOException { Get get = new Get(rowkey(BACKUP_INFO_PREFIX, backupId)); - get.addFamily(BackupSystemTable.SESSIONS_FAMILY); + get.addFamily(BackupMetaTable.SESSIONS_FAMILY); get.setMaxVersions(1); return get; } @@ -1333,7 +1386,7 @@ public final class BackupSystemTable implements Closeable { */ private Delete createDeleteForBackupInfo(String backupId) { Delete del = new Delete(rowkey(BACKUP_INFO_PREFIX, backupId)); - del.addFamily(BackupSystemTable.SESSIONS_FAMILY); + del.addFamily(BackupMetaTable.SESSIONS_FAMILY); return del; } @@ -1356,7 +1409,7 @@ public final class BackupSystemTable implements Closeable { */ private Get createGetForStartCode(String rootPath) throws IOException { Get get = new Get(rowkey(START_CODE_ROW, rootPath)); - get.addFamily(BackupSystemTable.META_FAMILY); + get.addFamily(BackupMetaTable.META_FAMILY); get.setMaxVersions(1); return get; } @@ -1368,7 +1421,7 @@ public final class BackupSystemTable implements Closeable { */ private Put createPutForStartCode(String startCode, String rootPath) { Put put = new Put(rowkey(START_CODE_ROW, rootPath)); - put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("startcode"), + put.addColumn(BackupMetaTable.META_FAMILY, Bytes.toBytes("startcode"), Bytes.toBytes(startCode)); return put; } @@ -1380,7 +1433,7 @@ public final class BackupSystemTable implements Closeable { */ private Get createGetForIncrBackupTableSet(String backupRoot) throws IOException { Get get = new Get(rowkey(INCR_BACKUP_SET, backupRoot)); - get.addFamily(BackupSystemTable.META_FAMILY); + get.addFamily(BackupMetaTable.META_FAMILY); get.setMaxVersions(1); return get; } @@ -1393,7 +1446,7 @@ public final class BackupSystemTable implements Closeable { private Put createPutForIncrBackupTableSet(Set tables, String backupRoot) { Put put = new Put(rowkey(INCR_BACKUP_SET, backupRoot)); for (TableName table : tables) { - put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes(table.getNameAsString()), + put.addColumn(BackupMetaTable.META_FAMILY, Bytes.toBytes(table.getNameAsString()), EMPTY_VALUE); } return put; @@ -1406,7 +1459,7 @@ public final class BackupSystemTable implements Closeable { */ private Delete createDeleteForIncrBackupTableSet(String backupRoot) { Delete delete = new Delete(rowkey(INCR_BACKUP_SET, backupRoot)); - delete.addFamily(BackupSystemTable.META_FAMILY); + delete.addFamily(BackupMetaTable.META_FAMILY); return delete; } @@ -1421,7 +1474,7 @@ public final class BackupSystemTable implements Closeable { stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); scan.setStartRow(startRow); scan.setStopRow(stopRow); - scan.addFamily(BackupSystemTable.SESSIONS_FAMILY); + scan.addFamily(BackupMetaTable.SESSIONS_FAMILY); scan.setMaxVersions(1); return scan; } @@ -1446,7 +1499,7 @@ public final class BackupSystemTable implements Closeable { private Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap, String backupRoot) { Put put = new Put(rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot, NULL, table.getNameAsString())); - put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("log-roll-map"), smap); + put.addColumn(BackupMetaTable.META_FAMILY, Bytes.toBytes("log-roll-map"), smap); return put; } @@ -1461,7 +1514,7 @@ public final class BackupSystemTable implements Closeable { stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); scan.setStartRow(startRow); scan.setStopRow(stopRow); - scan.addFamily(BackupSystemTable.META_FAMILY); + scan.addFamily(BackupMetaTable.META_FAMILY); return scan; } @@ -1486,7 +1539,7 @@ public final class BackupSystemTable implements Closeable { private Put createPutForRegionServerLastLogRollResult(String server, Long timestamp, String backupRoot) { Put put = new Put(rowkey(RS_LOG_TS_PREFIX, backupRoot, NULL, server)); - put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("rs-log-ts"), + put.addColumn(BackupMetaTable.META_FAMILY, Bytes.toBytes("rs-log-ts"), Bytes.toBytes(timestamp)); return put; } @@ -1502,7 +1555,7 @@ public final class BackupSystemTable implements Closeable { stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); scan.setStartRow(startRow); scan.setStopRow(stopRow); - scan.addFamily(BackupSystemTable.META_FAMILY); + scan.addFamily(BackupMetaTable.META_FAMILY); scan.setMaxVersions(1); return scan; @@ -1533,10 +1586,10 @@ public final class BackupSystemTable implements Closeable { Put put = new Put(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM, Bytes.toString(region), BLK_LD_DELIM, filename)); - put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, table.getName()); - put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, entry.getKey()); - put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, file.getBytes()); - put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_COMMIT); + put.addColumn(BackupMetaTable.META_FAMILY, TBL_COL, table.getName()); + put.addColumn(BackupMetaTable.META_FAMILY, FAM_COL, entry.getKey()); + put.addColumn(BackupMetaTable.META_FAMILY, PATH_COL, file.getBytes()); + put.addColumn(BackupMetaTable.META_FAMILY, STATE_COL, BL_COMMIT); puts.add(put); LOG.debug("writing done bulk path " + file + " for " + table + " " + Bytes.toString(region)); } @@ -1547,19 +1600,19 @@ public final class BackupSystemTable implements Closeable { public static void snapshot(Connection conn) throws IOException { try (Admin admin = conn.getAdmin()) { Configuration conf = conn.getConfiguration(); - admin.snapshot(BackupSystemTable.getSnapshotName(conf), BackupSystemTable.getTableName(conf)); + admin.snapshot(BackupMetaTable.getSnapshotName(conf), BackupMetaTable.getTableName(conf)); } } public static void restoreFromSnapshot(Connection conn) throws IOException { Configuration conf = conn.getConfiguration(); - LOG.debug("Restoring " + BackupSystemTable.getTableNameAsString(conf) + " from snapshot"); + LOG.debug("Restoring " + BackupMetaTable.getTableNameAsString(conf) + " from snapshot"); try (Admin admin = conn.getAdmin()) { - String snapshotName = BackupSystemTable.getSnapshotName(conf); + String snapshotName = BackupMetaTable.getSnapshotName(conf); if (snapshotExists(admin, snapshotName)) { - admin.disableTable(BackupSystemTable.getTableName(conf)); + admin.disableTable(BackupMetaTable.getTableName(conf)); admin.restoreSnapshot(snapshotName); - admin.enableTable(BackupSystemTable.getTableName(conf)); + admin.enableTable(BackupMetaTable.getTableName(conf)); LOG.debug("Done restoring backup system table"); } else { // Snapshot does not exists, i.e completeBackup failed after @@ -1587,9 +1640,9 @@ public final class BackupSystemTable implements Closeable { public static void deleteSnapshot(Connection conn) throws IOException { Configuration conf = conn.getConfiguration(); - LOG.debug("Deleting " + BackupSystemTable.getSnapshotName(conf) + " from the system"); + LOG.debug("Deleting " + BackupMetaTable.getSnapshotName(conf) + " from the system"); try (Admin admin = conn.getAdmin()) { - String snapshotName = BackupSystemTable.getSnapshotName(conf); + String snapshotName = BackupMetaTable.getSnapshotName(conf); if (snapshotExists(admin, snapshotName)) { admin.deleteSnapshot(snapshotName); LOG.debug("Done deleting backup system table snapshot"); @@ -1613,10 +1666,10 @@ public final class BackupSystemTable implements Closeable { Put put = new Put(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM, Bytes.toString(region), BLK_LD_DELIM, filename)); - put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, table.getName()); - put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, family); - put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, file.getBytes()); - put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_PREPARE); + put.addColumn(BackupMetaTable.META_FAMILY, TBL_COL, table.getName()); + put.addColumn(BackupMetaTable.META_FAMILY, FAM_COL, family); + put.addColumn(BackupMetaTable.META_FAMILY, PATH_COL, file.getBytes()); + put.addColumn(BackupMetaTable.META_FAMILY, STATE_COL, BL_PREPARE); puts.add(put); LOG.debug("writing raw bulk path " + file + " for " + table + " " + Bytes.toString(region)); } @@ -1627,7 +1680,7 @@ public final class BackupSystemTable implements Closeable { List lstDels = new ArrayList<>(lst.size()); for (TableName table : lst) { Delete del = new Delete(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM)); - del.addFamily(BackupSystemTable.META_FAMILY); + del.addFamily(BackupMetaTable.META_FAMILY); lstDels.add(del); } return lstDels; @@ -1777,7 +1830,7 @@ public final class BackupSystemTable implements Closeable { stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); scan.withStartRow(startRow); scan.withStopRow(stopRow); - scan.addFamily(BackupSystemTable.META_FAMILY); + scan.addFamily(BackupMetaTable.META_FAMILY); scan.setMaxVersions(1); return scan; } @@ -1814,7 +1867,7 @@ public final class BackupSystemTable implements Closeable { scan.setStartRow(startRow); scan.setStopRow(stopRow); // scan.setTimeRange(lower, Long.MAX_VALUE); - scan.addFamily(BackupSystemTable.META_FAMILY); + scan.addFamily(BackupMetaTable.META_FAMILY); scan.setMaxVersions(1); return scan; } @@ -1822,9 +1875,9 @@ public final class BackupSystemTable implements Closeable { static Put createPutForBulkLoadedFile(TableName tn, byte[] fam, String p, String backupId, long ts, int idx) { Put put = new Put(rowkey(BULK_LOAD_PREFIX, backupId + BLK_LD_DELIM + ts + BLK_LD_DELIM + idx)); - put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, tn.getName()); - put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, fam); - put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, p.getBytes()); + put.addColumn(BackupMetaTable.META_FAMILY, TBL_COL, tn.getName()); + put.addColumn(BackupMetaTable.META_FAMILY, FAM_COL, fam); + put.addColumn(BackupMetaTable.META_FAMILY, PATH_COL, p.getBytes()); return put; } @@ -1841,10 +1894,10 @@ public final class BackupSystemTable implements Closeable { List puts = new ArrayList(files.size()); for (String file : files) { Put put = new Put(rowkey(WALS_PREFIX, BackupUtils.getUniqueWALFileNamePart(file))); - put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("backupId"), + put.addColumn(BackupMetaTable.META_FAMILY, Bytes.toBytes("backupId"), Bytes.toBytes(backupId)); - put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("file"), Bytes.toBytes(file)); - put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("root"), Bytes.toBytes(backupRoot)); + put.addColumn(BackupMetaTable.META_FAMILY, Bytes.toBytes("file"), Bytes.toBytes(file)); + put.addColumn(BackupMetaTable.META_FAMILY, Bytes.toBytes("root"), Bytes.toBytes(backupRoot)); puts.add(put); } return puts; @@ -1863,7 +1916,7 @@ public final class BackupSystemTable implements Closeable { stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); scan.setStartRow(startRow); scan.setStopRow(stopRow); - scan.addFamily(BackupSystemTable.META_FAMILY); + scan.addFamily(BackupMetaTable.META_FAMILY); return scan; } @@ -1876,7 +1929,7 @@ public final class BackupSystemTable implements Closeable { private Get createGetForCheckWALFile(String file) throws IOException { Get get = new Get(rowkey(WALS_PREFIX, BackupUtils.getUniqueWALFileNamePart(file))); // add backup root column - get.addFamily(BackupSystemTable.META_FAMILY); + get.addFamily(BackupMetaTable.META_FAMILY); return get; } @@ -1891,7 +1944,7 @@ public final class BackupSystemTable implements Closeable { stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); scan.setStartRow(startRow); scan.setStopRow(stopRow); - scan.addFamily(BackupSystemTable.META_FAMILY); + scan.addFamily(BackupMetaTable.META_FAMILY); return scan; } @@ -1901,7 +1954,7 @@ public final class BackupSystemTable implements Closeable { */ private Get createGetForBackupSet(String name) { Get get = new Get(rowkey(SET_KEY_PREFIX, name)); - get.addFamily(BackupSystemTable.META_FAMILY); + get.addFamily(BackupMetaTable.META_FAMILY); return get; } @@ -1912,7 +1965,7 @@ public final class BackupSystemTable implements Closeable { */ private Delete createDeleteForBackupSet(String name) { Delete del = new Delete(rowkey(SET_KEY_PREFIX, name)); - del.addFamily(BackupSystemTable.META_FAMILY); + del.addFamily(BackupMetaTable.META_FAMILY); return del; } @@ -1925,7 +1978,7 @@ public final class BackupSystemTable implements Closeable { private Put createPutForBackupSet(String name, String[] tables) { Put put = new Put(rowkey(SET_KEY_PREFIX, name)); byte[] value = convertToByteArray(tables); - put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes("tables"), value); + put.addColumn(BackupMetaTable.META_FAMILY, Bytes.toBytes("tables"), value); return put; } diff --git hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java index 74408c31c3..a9ff4c7b0d 100644 --- hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java +++ hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java @@ -35,7 +35,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable.WALItem; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable.WALItem; import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.yetus.audience.InterfaceAudience; diff --git hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java index 37c45e016b..cc71e425c0 100644 --- hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java +++ hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java @@ -110,6 +110,7 @@ public class IncrementalTableBackupClient extends TableBackupClient { * @param sTableList list of tables to be backed up * @return map of table to List of files */ + @SuppressWarnings("unchecked") protected Map>[] handleBulkLoad(List sTableList) throws IOException { Map>[] mapForSrc = new Map[sTableList.size()]; List activeFiles = new ArrayList(); @@ -126,6 +127,7 @@ public class IncrementalTableBackupClient extends TableBackupClient { } Path rootdir = FSUtils.getRootDir(conf); Path tgtRoot = new Path(new Path(backupInfo.getBackupRootDir()), backupId); + for (Map.Entry>>>> tblEntry : map.entrySet()) { TableName srcTable = tblEntry.getKey(); @@ -192,26 +194,47 @@ public class IncrementalTableBackupClient extends TableBackupClient { } copyBulkLoadedFiles(activeFiles, archiveFiles); - - backupManager.writeBulkLoadedFiles(sTableList, mapForSrc); - backupManager.removeBulkLoadedRows(sTableList, pair.getSecond()); + backupManager.deleteBulkLoadedRows(pair.getSecond()); return mapForSrc; } private void copyBulkLoadedFiles(List activeFiles, List archiveFiles) - throws IOException - { + throws IOException { try { // Enable special mode of BackupDistCp conf.setInt(MapReduceBackupCopyJob.NUMBER_OF_LEVELS_TO_PRESERVE_KEY, 5); // Copy active files String tgtDest = backupInfo.getBackupRootDir() + Path.SEPARATOR + backupInfo.getBackupId(); - if (activeFiles.size() > 0) { + int attempt = 1; + while (activeFiles.size() > 0) { + LOG.info("Copy "+ activeFiles.size() + + " active bulk loaded files. Attempt ="+ (attempt++)); String[] toCopy = new String[activeFiles.size()]; activeFiles.toArray(toCopy); - incrementalCopyHFiles(toCopy, tgtDest); + // Active file can be archived during copy operation, + // we need to handle this properly + try { + incrementalCopyHFiles(toCopy, tgtDest); + break; + } catch (IOException e) { + // Check if some files got archived + // Update active and archived lists + // When file is being moved from active to archive + // directory, the number of active files decreases + + int numOfActive = activeFiles.size(); + updateFileLists(activeFiles, archiveFiles); + if (activeFiles.size() < numOfActive) { + continue; + } + // if not - throw exception + throw e; + } } + // If incremental copy will fail for archived files + // we will have partially loaded files in backup destination (only files from active data + // directory). It is OK, because the backup will marked as FAILED and data will be cleaned up if (archiveFiles.size() > 0) { String[] toCopy = new String[archiveFiles.size()]; archiveFiles.toArray(toCopy); @@ -224,6 +247,26 @@ public class IncrementalTableBackupClient extends TableBackupClient { } + private void updateFileLists(List activeFiles, List archiveFiles) + throws IOException { + FileSystem fs = FileSystem.get(conf); + List newlyArchived = new ArrayList(); + + for (String spath : activeFiles) { + if (!fs.exists(new Path(spath))) { + newlyArchived.add(spath); + } + } + + if (newlyArchived.size() > 0) { + activeFiles.removeAll(newlyArchived); + archiveFiles.addAll(newlyArchived); + } + + LOG.debug(newlyArchived.size() + " files have been archived."); + + } + @Override public void execute() throws IOException { diff --git hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java index 099a70da39..6d178259e0 100644 --- hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java +++ hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java @@ -21,13 +21,10 @@ package org.apache.hadoop.hbase.backup.impl; import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY; import java.io.IOException; -import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.TreeSet; @@ -35,19 +32,20 @@ import org.apache.commons.lang3.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.HBackupFileSystem; import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; -import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.hadoop.hbase.backup.util.RestoreTool; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles; -import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles.LoadQueueItem; +import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.yetus.audience.InterfaceAudience; /** * Restore table implementation @@ -171,8 +169,10 @@ public class RestoreTablesClient { for (int i = 1; i < images.length; i++) { BackupImage im = images[i]; String fileBackupDir = - HBackupFileSystem.getTableBackupDataDir(im.getRootDir(), im.getBackupId(), sTable); - dirList.add(new Path(fileBackupDir)); + HBackupFileSystem.getTableBackupDir(im.getRootDir(), im.getBackupId(), sTable); + List list = getFilesRecursively(fileBackupDir); + dirList.addAll(list); + } String dirs = StringUtils.join(dirList, ","); @@ -185,6 +185,22 @@ public class RestoreTablesClient { LOG.info(sTable + " has been successfully restored to " + tTable); } + private List getFilesRecursively(String fileBackupDir) throws IllegalArgumentException, + IOException + { + FileSystem fs = FileSystem.get((new Path(fileBackupDir)).toUri(), + new Configuration()); + List list = new ArrayList(); + RemoteIterator it = fs.listFiles(new Path(fileBackupDir), true); + while (it.hasNext()) { + Path p = it.next().getPath(); + if (HFile.isHFileFormat(fs, p)) { + list.add(p); + } + } + return list; + } + /** * Restore operation. Stage 2: resolved Backup Image dependency * @param backupManifestMap : tableName, Manifest @@ -226,27 +242,6 @@ public class RestoreTablesClient { } } } - try (BackupSystemTable table = new BackupSystemTable(conn)) { - List sTableList = Arrays.asList(sTableArray); - for (String id : backupIdSet) { - LOG.debug("restoring bulk load for " + id); - Map>[] mapForSrc = table.readBulkLoadedFiles(id, sTableList); - Map loaderResult; - conf.setBoolean(LoadIncrementalHFiles.ALWAYS_COPY_FILES, true); - LoadIncrementalHFiles loader = BackupUtils.createLoader(conf); - for (int i = 0; i < sTableList.size(); i++) { - if (mapForSrc[i] != null && !mapForSrc[i].isEmpty()) { - loaderResult = loader.run(mapForSrc[i], tTableArray[i]); - LOG.debug("bulk loading " + sTableList.get(i) + " to " + tTableArray[i]); - if (loaderResult.isEmpty()) { - String msg = "Couldn't bulk load for " + sTableList.get(i) + " to " + tTableArray[i]; - LOG.error(msg); - throw new IOException(msg); - } - } - } - } - } LOG.debug("restoreStage finished"); } diff --git hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java index 05fcec327f..40e73fb81e 100644 --- hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java +++ hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java @@ -37,13 +37,12 @@ import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.HBackupFileSystem; import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; - -import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; +import org.apache.yetus.audience.InterfaceAudience; /** * Base class for backup operation. Concrete implementation for @@ -108,7 +107,7 @@ public abstract class TableBackupClient { protected void beginBackup(BackupManager backupManager, BackupInfo backupInfo) throws IOException { - BackupSystemTable.snapshot(conn); + BackupMetaTable.snapshot(conn); backupManager.setBackupInfo(backupInfo); // set the start timestamp of the overall backup long startTs = EnvironmentEdgeManager.currentTime(); @@ -258,22 +257,21 @@ public abstract class TableBackupClient { } } - public static void cleanupAndRestoreBackupSystem (Connection conn, BackupInfo backupInfo, - Configuration conf) throws IOException - { + public static void cleanupAndRestoreBackupSystem(Connection conn, BackupInfo backupInfo, + Configuration conf) throws IOException { BackupType type = backupInfo.getType(); - // if full backup, then delete HBase snapshots if there already are snapshots taken - // and also clean up export snapshot log files if exist - if (type == BackupType.FULL) { - deleteSnapshots(conn, backupInfo, conf); - cleanupExportSnapshotLog(conf); - } - BackupSystemTable.restoreFromSnapshot(conn); - BackupSystemTable.deleteSnapshot(conn); - // clean up the uncompleted data at target directory if the ongoing backup has already entered - // the copy phase - // For incremental backup, DistCp logs will be cleaned with the targetDir. - cleanupTargetDir(backupInfo, conf); + // if full backup, then delete HBase snapshots if there already are snapshots taken + // and also clean up export snapshot log files if exist + if (type == BackupType.FULL) { + deleteSnapshots(conn, backupInfo, conf); + cleanupExportSnapshotLog(conf); + } + BackupMetaTable.restoreFromSnapshot(conn); + BackupMetaTable.deleteSnapshot(conn); + // clean up the uncompleted data at target directory if the ongoing backup has already entered + // the copy phase + // For incremental backup, DistCp logs will be cleaned with the targetDir. + cleanupTargetDir(backupInfo, conf); } @@ -402,7 +400,7 @@ public abstract class TableBackupClient { } else if (type == BackupType.INCREMENTAL) { cleanupDistCpLog(backupInfo, conf); } - BackupSystemTable.deleteSnapshot(conn); + BackupMetaTable.deleteSnapshot(conn); backupManager.updateBackupInfo(backupInfo); // Finish active session diff --git hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java index b93d911fae..836e87ce3a 100644 --- hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java +++ hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.backup.BackupMergeJob; import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.HBackupFileSystem; import org.apache.hadoop.hbase.backup.impl.BackupManifest; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; @@ -90,7 +90,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob { List> processedTableList = new ArrayList>(); boolean finishedTables = false; Connection conn = ConnectionFactory.createConnection(getConf()); - BackupSystemTable table = new BackupSystemTable(conn); + BackupMetaTable table = new BackupMetaTable(conn); FileSystem fs = FileSystem.get(getConf()); try { @@ -223,7 +223,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob { String backupRoot) throws IOException { // Delete from backup system table - try (BackupSystemTable table = new BackupSystemTable(conn)) { + try (BackupMetaTable table = new BackupMetaTable(conn)) { for (String backupId : backupIds) { table.deleteBackupInfo(backupId); } @@ -286,7 +286,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob { Set allSet = new HashSet(); try (Connection conn = ConnectionFactory.createConnection(conf); - BackupSystemTable table = new BackupSystemTable(conn)) { + BackupMetaTable table = new BackupMetaTable(conn)) { for (String backupId : backupIds) { BackupInfo bInfo = table.readBackupInfo(backupId); diff --git hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java index bed61ed321..4db828e1d5 100644 --- hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java +++ hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreJob.java @@ -31,9 +31,9 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.RestoreJob; import org.apache.hadoop.hbase.backup.util.BackupUtils; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles; import org.apache.hadoop.util.Tool; +import org.apache.yetus.audience.InterfaceAudience; /** diff --git hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java index 0f1722fd6c..3826c8f40c 100644 --- hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java +++ hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.impl.BackupManager; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -84,7 +84,7 @@ public class BackupLogCleaner extends BaseLogCleanerDelegate { } List list = new ArrayList(); - try (final BackupSystemTable table = new BackupSystemTable(conn)) { + try (final BackupMetaTable table = new BackupMetaTable(conn)) { // If we do not have recorded backup sessions try { if (!table.hasBackupSessions()) { diff --git hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java index 183463dcfd..a182cfd8c5 100644 --- hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java +++ hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java @@ -24,7 +24,7 @@ import java.util.concurrent.Callable; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.errorhandling.ForeignException; @@ -100,7 +100,7 @@ public class LogRollBackupSubprocedure extends Subprocedure { + " on " + rss.getServerName()); Connection connection = rss.getConnection(); - try (final BackupSystemTable table = new BackupSystemTable(connection)) { + try (final BackupMetaTable table = new BackupMetaTable(connection)) { // sanity check, good for testing HashMap serverTimestampMap = table.readRegionServerLastLogRollResult(backupRoot); diff --git hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index 8d23c69746..6c5dc79729 100644 --- hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -44,7 +44,7 @@ import org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase; import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; import org.apache.hadoop.hbase.backup.impl.BackupManager; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.backup.impl.FullTableBackupClient; import org.apache.hadoop.hbase.backup.impl.IncrementalBackupManager; import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupClient; @@ -58,7 +58,6 @@ import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.security.HadoopSecurityEnabledUserProviderForTesting; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.access.SecureTestUtil; @@ -296,9 +295,6 @@ public class TestBackupBase { // setup configuration SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration()); } - String coproc = conf1.get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY); - conf1.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, (coproc == null ? "" : coproc + ",") + - BackupObserver.class.getName()); conf1.setBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, true); BackupManager.decorateMasterConfiguration(conf1); BackupManager.decorateRegionServerConfiguration(conf1); @@ -475,7 +471,7 @@ public class TestBackupBase { } private BackupInfo getBackupInfo(String backupId) throws IOException { - try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + try (BackupMetaTable table = new BackupMetaTable(TEST_UTIL.getConnection())) { BackupInfo status = table.readBackupInfo(backupId); return status; } diff --git hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java index ebfc735513..f668b8e2b9 100644 --- hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java +++ hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java @@ -29,7 +29,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.util.ToolRunner; import org.junit.Test; @@ -55,7 +55,7 @@ public class TestBackupDelete extends TestBackupBase { assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); String[] backupIds = new String[] { backupId }; - BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection()); + BackupMetaTable table = new BackupMetaTable(TEST_UTIL.getConnection()); BackupInfo info = table.readBackupInfo(backupId); Path path = new Path(info.getBackupRootDir(), backupId); FileSystem fs = FileSystem.get(path.toUri(), conf1); diff --git hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java index af8e90785f..d7e48ac2ec 100644 --- hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java +++ hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteWithFailures.java @@ -32,7 +32,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.SnapshotDescription; @@ -145,7 +145,7 @@ public class TestBackupDeleteWithFailures extends TestBackupBase{ assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); String[] backupIds = new String[] { backupId }; - BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection()); + BackupMetaTable table = new BackupMetaTable(TEST_UTIL.getConnection()); BackupInfo info = table.readBackupInfo(backupId); Path path = new Path(info.getBackupRootDir(), backupId); FileSystem fs = FileSystem.get(path.toUri(), conf1); diff --git hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java index 0672325865..b3f1ed299f 100644 --- hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java +++ hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java @@ -30,7 +30,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; import org.apache.hadoop.hbase.backup.impl.BackupCommands; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.util.ToolRunner; import org.junit.Test; @@ -99,7 +99,7 @@ public class TestBackupDescribe extends TestBackupBase { assertTrue(response.indexOf(backupId) > 0); assertTrue(response.indexOf("COMPLETE") > 0); - BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection()); + BackupMetaTable table = new BackupMetaTable(TEST_UTIL.getConnection()); BackupInfo status = table.readBackupInfo(backupId); String desc = status.getShortDescription(); table.close(); diff --git hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java index 6c1890b4a8..83ddeee748 100644 --- hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java +++ hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupHFileCleaner.java @@ -34,7 +34,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -119,7 +119,7 @@ public class TestBackupHFileCleaner { List list = new ArrayList<>(1); list.add(file); try (Connection conn = ConnectionFactory.createConnection(conf); - BackupSystemTable sysTbl = new BackupSystemTable(conn)) { + BackupMetaTable sysTbl = new BackupMetaTable(conn)) { List sTableList = new ArrayList<>(); sTableList.add(tableName); Map>[] maps = new Map[1]; diff --git hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMetaTable.java similarity index 93% rename from hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java rename to hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMetaTable.java index ee5f9b9f18..16f8b7119b 100644 --- hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java +++ hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMetaTable.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; import org.apache.hadoop.hbase.backup.impl.BackupManager; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -56,13 +56,13 @@ import org.junit.experimental.categories.Category; * Test cases for backup system table API */ @Category(MediumTests.class) -public class TestBackupSystemTable { +public class TestBackupMetaTable { private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); protected static Configuration conf = UTIL.getConfiguration(); protected static MiniHBaseCluster cluster; protected static Connection conn; - protected BackupSystemTable table; + protected BackupMetaTable table; @BeforeClass public static void setUp() throws Exception { @@ -75,7 +75,7 @@ public class TestBackupSystemTable { @Before public void before() throws IOException { - table = new BackupSystemTable(conn); + table = new BackupMetaTable(conn); } @After @@ -113,10 +113,10 @@ public class TestBackupSystemTable { private void cleanBackupTable() throws IOException { Admin admin = UTIL.getHBaseAdmin(); - admin.disableTable(BackupSystemTable.getTableName(conf)); - admin.truncateTable(BackupSystemTable.getTableName(conf), true); - if (admin.isTableDisabled(BackupSystemTable.getTableName(conf))) { - admin.enableTable(BackupSystemTable.getTableName(conf)); + admin.disableTable(BackupMetaTable.getTableName(conf)); + admin.truncateTable(BackupMetaTable.getTableName(conf), true); + if (admin.isTableDisabled(BackupMetaTable.getTableName(conf))) { + admin.enableTable(BackupMetaTable.getTableName(conf)); } } @@ -150,7 +150,7 @@ public class TestBackupSystemTable { @Test public void testBackupDelete() throws IOException { - try (BackupSystemTable table = new BackupSystemTable(conn)) { + try (BackupMetaTable table = new BackupMetaTable(conn)) { int n = 10; List list = createBackupInfoList(n); @@ -223,7 +223,7 @@ public class TestBackupSystemTable { tables2.add(TableName.valueOf("t5")); table.addIncrementalBackupTableSet(tables1, "root"); - BackupSystemTable table = new BackupSystemTable(conn); + BackupMetaTable table = new BackupMetaTable(conn); TreeSet res1 = (TreeSet) table.getIncrementalBackupTableSet("root"); assertTrue(tables1.size() == res1.size()); Iterator desc1 = tables1.descendingIterator(); @@ -344,7 +344,7 @@ public class TestBackupSystemTable { @Test public void testBackupSetAddNotExists() throws IOException { - try (BackupSystemTable table = new BackupSystemTable(conn)) { + try (BackupMetaTable table = new BackupMetaTable(conn)) { String[] tables = new String[] { "table1", "table2", "table3" }; String setName = "name"; @@ -362,7 +362,7 @@ public class TestBackupSystemTable { @Test public void testBackupSetAddExists() throws IOException { - try (BackupSystemTable table = new BackupSystemTable(conn)) { + try (BackupMetaTable table = new BackupMetaTable(conn)) { String[] tables = new String[] { "table1", "table2", "table3" }; String setName = "name"; @@ -385,7 +385,7 @@ public class TestBackupSystemTable { @Test public void testBackupSetAddExistsIntersects() throws IOException { - try (BackupSystemTable table = new BackupSystemTable(conn)) { + try (BackupMetaTable table = new BackupMetaTable(conn)) { String[] tables = new String[] { "table1", "table2", "table3" }; String setName = "name"; @@ -408,7 +408,7 @@ public class TestBackupSystemTable { @Test public void testBackupSetRemoveSomeNotExists() throws IOException { - try (BackupSystemTable table = new BackupSystemTable(conn)) { + try (BackupMetaTable table = new BackupMetaTable(conn)) { String[] tables = new String[] { "table1", "table2", "table3", "table4" }; String setName = "name"; @@ -430,7 +430,7 @@ public class TestBackupSystemTable { @Test public void testBackupSetRemove() throws IOException { - try (BackupSystemTable table = new BackupSystemTable(conn)) { + try (BackupMetaTable table = new BackupMetaTable(conn)) { String[] tables = new String[] { "table1", "table2", "table3", "table4" }; String setName = "name"; @@ -452,7 +452,7 @@ public class TestBackupSystemTable { @Test public void testBackupSetDelete() throws IOException { - try (BackupSystemTable table = new BackupSystemTable(conn)) { + try (BackupMetaTable table = new BackupMetaTable(conn)) { String[] tables = new String[] { "table1", "table2", "table3", "table4" }; String setName = "name"; @@ -467,7 +467,7 @@ public class TestBackupSystemTable { @Test public void testBackupSetList() throws IOException { - try (BackupSystemTable table = new BackupSystemTable(conn)) { + try (BackupMetaTable table = new BackupMetaTable(conn)) { String[] tables = new String[] { "table1", "table2", "table3", "table4" }; String setName1 = "name1"; diff --git hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRepair.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRepair.java index 4b1d84f086..e28e953646 100644 --- hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRepair.java +++ hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupRepair.java @@ -26,7 +26,7 @@ import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.backup.impl.TableBackupClient; import org.apache.hadoop.hbase.backup.impl.TableBackupClient.Stage; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -59,7 +59,7 @@ public class TestBackupRepair extends TestBackupBase { public void runBackupAndFailAtStageWithRestore(int stage) throws Exception { conf1.setInt(FullTableBackupClientForTest.BACKUP_TEST_MODE_STAGE, stage); - try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + try (BackupMetaTable table = new BackupMetaTable(TEST_UTIL.getConnection())) { int before = table.getBackupHistory().size(); String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", diff --git hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java index 919668e2b3..8e8d2e6b50 100644 --- hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java +++ hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java @@ -23,7 +23,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.util.ToolRunner; import org.junit.Test; @@ -37,7 +37,7 @@ public class TestFullBackup extends TestBackupBase { @Test public void testFullBackupMultipleCommand() throws Exception { LOG.info("test full backup on a multiple tables with data: command-line"); - try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + try (BackupMetaTable table = new BackupMetaTable(TEST_UTIL.getConnection())) { int before = table.getBackupHistory().size(); String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", diff --git hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java index c11597146c..41135db55b 100644 --- hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java +++ hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java @@ -27,7 +27,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.util.ToolRunner; @@ -49,7 +49,7 @@ public class TestFullBackupSet extends TestBackupBase { LOG.info("Test full backup, backup set exists"); // Create set - try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + try (BackupMetaTable table = new BackupMetaTable(TEST_UTIL.getConnection())) { String name = "name"; table.addToBackupSet(name, new String[] { table1.getNameAsString() }); List names = table.describeBackupSet(name); diff --git hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java index 2ca13650c8..30e43edeec 100644 --- hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java +++ hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java @@ -27,7 +27,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.util.ToolRunner; @@ -45,7 +45,7 @@ public class TestFullBackupSetRestoreSet extends TestBackupBase { LOG.info("Test full restore set"); // Create set - try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + try (BackupMetaTable table = new BackupMetaTable(TEST_UTIL.getConnection())) { String name = "name"; table.addToBackupSet(name, new String[] { table1.getNameAsString() }); List names = table.describeBackupSet(name); @@ -88,7 +88,7 @@ public class TestFullBackupSetRestoreSet extends TestBackupBase { LOG.info("Test full restore set to same table"); // Create set - try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + try (BackupMetaTable table = new BackupMetaTable(TEST_UTIL.getConnection())) { String name = "name1"; table.addToBackupSet(name, new String[] { table1.getNameAsString() }); List names = table.describeBackupSet(name); diff --git hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.java index 32f40e29b9..0e0181cd6c 100644 --- hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.java +++ hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupWithFailures.java @@ -26,7 +26,7 @@ import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.backup.impl.TableBackupClient; import org.apache.hadoop.hbase.backup.impl.TableBackupClient.Stage; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -54,7 +54,7 @@ public class TestFullBackupWithFailures extends TestBackupBase { public void runBackupAndFailAtStage(int stage) throws Exception { conf1.setInt(FullTableBackupClientForTest.BACKUP_TEST_MODE_STAGE, stage); - try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + try (BackupMetaTable table = new BackupMetaTable(TEST_UTIL.getConnection())) { int before = table.getBackupHistory().size(); String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", diff --git hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java index ebac1ea3f5..c6b1800c43 100644 --- hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java +++ hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java @@ -34,7 +34,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupMergeJob; import org.apache.hadoop.hbase.backup.mapreduce.MapReduceHFileSplitterJob; import org.apache.hadoop.hbase.backup.util.BackupUtils; @@ -98,7 +98,7 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase { List> processedTableList = new ArrayList>(); boolean finishedTables = false; Connection conn = ConnectionFactory.createConnection(getConf()); - BackupSystemTable table = new BackupSystemTable(conn); + BackupMetaTable table = new BackupMetaTable(conn); FileSystem fs = FileSystem.get(getConf()); try { @@ -272,7 +272,7 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase { bAdmin.mergeBackups(backups); Assert.fail("Expected IOException"); } catch (IOException e) { - BackupSystemTable table = new BackupSystemTable(conn); + BackupMetaTable table = new BackupMetaTable(conn); if(phase.ordinal() < FailurePhase.PHASE4.ordinal()) { // No need to repair: // Both Merge and backup exclusive operations are finished diff --git hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java index f63bf298b6..fa4bb31058 100644 --- hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java +++ hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithBulkLoad.java @@ -29,7 +29,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -113,22 +113,37 @@ public class TestIncrementalBackupWithBulkLoad extends TestBackupBase { request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); String backupIdIncMultiple = client.backupTables(request); assertTrue(checkSucceeded(backupIdIncMultiple)); - + // #4 balk load again + LOG.debug("bulk loading into " + testName); + int actual1 = TestLoadIncrementalHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName, + qualName, false, null, new byte[][][] { + new byte[][]{ Bytes.toBytes("ppp"), Bytes.toBytes("qqq") }, + new byte[][]{ Bytes.toBytes("rrr"), Bytes.toBytes("sss") }, + }, true, false, true, NB_ROWS_IN_BATCH * 2 + actual, NB_ROWS2); + + // #5 - incremental backup for table1 + tables = Lists.newArrayList(table1); + request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); + String backupIdIncMultiple1 = client.backupTables(request); + assertTrue(checkSucceeded(backupIdIncMultiple1)); + + // Delete all data in table1 + TEST_UTIL.deleteTableData(table1); // #5.1 - check tables for full restore */ HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin(); // #6 - restore incremental backup for table1 TableName[] tablesRestoreIncMultiple = new TableName[] { table1 }; - TableName[] tablesMapIncMultiple = new TableName[] { table1_restore }; - client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, - false, tablesRestoreIncMultiple, tablesMapIncMultiple, true)); + //TableName[] tablesMapIncMultiple = new TableName[] { table1_restore }; + client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple1, + false, tablesRestoreIncMultiple, tablesRestoreIncMultiple, true)); - HTable hTable = (HTable) conn.getTable(table1_restore); - Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2+actual); + HTable hTable = (HTable) conn.getTable(table1); + Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2 + actual + actual1); request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR); backupIdFull = client.backupTables(request); - try (final BackupSystemTable table = new BackupSystemTable(conn)) { + try (final BackupMetaTable table = new BackupMetaTable(conn)) { Pair>>>>, List> pair = table.readBulkloadRows(tables); assertTrue("map still has " + pair.getSecond().size() + " entries", diff --git hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java index 84a596eb5c..f706e80388 100644 --- hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java +++ hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupWithFailures.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.backup.impl.TableBackupClient; import org.apache.hadoop.hbase.backup.impl.TableBackupClient.Stage; import org.apache.hadoop.hbase.client.Connection; @@ -136,7 +136,7 @@ public class TestIncrementalBackupWithFailures extends TestBackupBase { private void runBackupAndFailAtStage(int stage) throws Exception { conf1.setInt(FullTableBackupClientForTest.BACKUP_TEST_MODE_STAGE, stage); - try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + try (BackupMetaTable table = new BackupMetaTable(TEST_UTIL.getConnection())) { int before = table.getBackupHistory().size(); String[] args = new String[] { "create", "incremental", BACKUP_ROOT_DIR, "-t", diff --git hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java index 556521f7cb..4fc777e043 100644 --- hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java +++ hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestRepairAfterFailedDelete.java @@ -27,7 +27,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -50,7 +50,7 @@ public class TestRepairAfterFailedDelete extends TestBackupBase { assertTrue(checkSucceeded(backupId)); LOG.info("backup complete"); String[] backupIds = new String[] { backupId }; - BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection()); + BackupMetaTable table = new BackupMetaTable(TEST_UTIL.getConnection()); BackupInfo info = table.readBackupInfo(backupId); Path path = new Path(info.getBackupRootDir(), backupId); FileSystem fs = FileSystem.get(path.toUri(), conf1); @@ -60,7 +60,7 @@ public class TestRepairAfterFailedDelete extends TestBackupBase { String snapshotName = "snapshot-backup"; Connection conn = TEST_UTIL.getConnection(); Admin admin = conn.getAdmin(); - admin.snapshot(snapshotName, BackupSystemTable.getTableName(conf1)); + admin.snapshot(snapshotName, BackupMetaTable.getTableName(conf1)); int deleted = getBackupAdmin().deleteBackups(backupIds); @@ -70,9 +70,9 @@ public class TestRepairAfterFailedDelete extends TestBackupBase { // Emulate delete failure // Restore backup system table - admin.disableTable(BackupSystemTable.getTableName(conf1)); + admin.disableTable(BackupMetaTable.getTableName(conf1)); admin.restoreSnapshot(snapshotName); - admin.enableTable(BackupSystemTable.getTableName(conf1)); + admin.enableTable(BackupMetaTable.getTableName(conf1)); // Start backup session table.startBackupExclusiveOperation(); // Start delete operation diff --git hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java index 2a0c4b4640..f05bd7a277 100644 --- hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java +++ hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestSystemTableSnapshot.java @@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.backup; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.junit.experimental.categories.Category; @@ -40,7 +40,7 @@ public class TestSystemTableSnapshot extends TestBackupBase { LOG.info("test snapshot system table"); - TableName backupSystem = BackupSystemTable.getTableName(conf1); + TableName backupSystem = BackupMetaTable.getTableName(conf1); HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); String snapshotName = "sysTable"; diff --git hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java index 5f72f458b9..0a569a8df7 100644 --- hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java +++ hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/master/TestBackupLogCleaner.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.TestBackupBase; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HTable; @@ -66,7 +66,7 @@ public class TestBackupLogCleaner extends TestBackupBase { List tableSetFullList = Lists.newArrayList(table1, table2, table3, table4); - try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) { + try (BackupMetaTable systemTable = new BackupMetaTable(TEST_UTIL.getConnection())) { // Verify that we have no backup sessions yet assertFalse(systemTable.hasBackupSessions()); diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java index de307db0f8..b4471264e7 100644 --- hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java +++ hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java @@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.backup.impl.BackupAdminImpl; import org.apache.hadoop.hbase.backup.impl.BackupManager; -import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupMetaTable; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HTable; @@ -229,7 +229,7 @@ public class IntegrationTestBackupRestore extends IntegrationTestBase { } private BackupInfo getBackupInfo(String backupId) throws IOException { - try (BackupSystemTable table = new BackupSystemTable(util.getConnection())) { + try (BackupMetaTable table = new BackupMetaTable(util.getConnection())) { return table.readBackupInfo(backupId); } }