diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java index 4d6b2a7..df8860a 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java @@ -32,9 +32,7 @@ import java.util.Set; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.util.BackupClientUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -42,6 +40,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder; import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus; +import org.apache.hadoop.hbase.util.Bytes; /** @@ -51,11 +50,13 @@ import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus @InterfaceStability.Evolving public class BackupInfo implements Comparable { private static final Log LOG = LogFactory.getLog(BackupInfo.class); + // backup status flag public static enum BackupState { WAITING, RUNNING, COMPLETE, FAILED, ANY; } - // backup phase + + // backup phase public static enum BackupPhase { SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST; } @@ -102,16 +103,16 @@ public class BackupInfo implements Comparable { // backup progress in %% (0-100) private int progress; - + // distributed job id private String jobId; - + // Number of parallel workers. -1 - system defined private int workers = -1; - + // Bandwidth per worker in MB per sec. -1 - unlimited - private long bandwidth = -1; - + private long bandwidth = -1; + public BackupInfo() { backupStatusMap = new HashMap(); } @@ -121,8 +122,8 @@ public class BackupInfo implements Comparable { this.backupId = backupId; this.type = type; this.targetRootDir = targetRootDir; - if(LOG.isDebugEnabled()){ - LOG.debug("CreateBackupContext: " + tables.length+" "+tables[0] ); + if (LOG.isDebugEnabled()) { + LOG.debug("CreateBackupContext: " + tables.length + " " + tables[0]); } this.addTables(tables); @@ -165,8 +166,9 @@ public class BackupInfo implements Comparable { public HashMap> getTableSetTimestampMap() { return tableSetTimestampMap; } - - public void setTableSetTimestampMap(HashMap> tableSetTimestampMap) { + + public void + setTableSetTimestampMap(HashMap> tableSetTimestampMap) { this.tableSetTimestampMap = tableSetTimestampMap; } @@ -293,6 +295,14 @@ public class BackupInfo implements Comparable { } } + public void setTables(List tables) { + this.backupStatusMap.clear(); + for (TableName table : tables) { + BackupStatus backupStatus = new BackupStatus(table, this.targetRootDir, this.backupId); + this.backupStatusMap.put(table, backupStatus); + } + } + public String getTargetRootDir() { return targetRootDir; } @@ -317,8 +327,8 @@ public class BackupInfo implements Comparable { * Set the new region server log timestamps after distributed log roll * @param newTableSetTimestampMap table timestamp map */ - public void setIncrTimestampMap(HashMap> newTableSetTimestampMap) { + public void + setIncrTimestampMap(HashMap> newTableSetTimestampMap) { this.tableSetTimestampMap = newTableSetTimestampMap; } @@ -366,12 +376,27 @@ public class BackupInfo implements Comparable { return builder.build(); } + @Override + public boolean equals(Object obj) { + if (obj instanceof BackupInfo) { + BackupInfo other = (BackupInfo) obj; + try { + return Bytes.equals(toByteArray(), other.toByteArray()); + } catch (IOException e) { + LOG.error(e); + return false; + } + } else { + return false; + } + } + public byte[] toByteArray() throws IOException { return toProtosBackupInfo().toByteArray(); } private void setBackupStatusMap(Builder builder) { - for (Entry entry: backupStatusMap.entrySet()) { + for (Entry entry : backupStatusMap.entrySet()) { builder.addTableBackupStatus(entry.getValue().toProto()); } } @@ -379,7 +404,7 @@ public class BackupInfo implements Comparable { public static BackupInfo fromByteArray(byte[] data) throws IOException { return fromProto(BackupProtos.BackupInfo.parseFrom(data)); } - + public static BackupInfo fromStream(final InputStream stream) throws IOException { return fromProto(BackupProtos.BackupInfo.parseDelimitedFrom(stream)); } @@ -418,7 +443,7 @@ public class BackupInfo implements Comparable { private static Map toMap(List list) { HashMap map = new HashMap<>(); - for (TableBackupStatus tbs : list){ + for (TableBackupStatus tbs : list) { map.put(ProtobufUtil.toTableName(tbs.getTable()), BackupStatus.convert(tbs)); } return map; @@ -455,18 +480,16 @@ public class BackupInfo implements Comparable { .append(" progress: ").append(getProgress()); return sb.toString(); } - + public String getTableListAsString() { return StringUtils.join(backupStatusMap.keySet(), ","); } @Override public int compareTo(BackupInfo o) { - Long thisTS = - new Long(this.getBackupId().substring(this.getBackupId().lastIndexOf("_") + 1)); - Long otherTS = - new Long(o.getBackupId().substring(o.getBackupId().lastIndexOf("_") + 1)); - return thisTS.compareTo(otherTS); + Long thisTS = new Long(this.getBackupId().substring(this.getBackupId().lastIndexOf("_") + 1)); + Long otherTS = new Long(o.getBackupId().substring(o.getBackupId().lastIndexOf("_") + 1)); + return thisTS.compareTo(otherTS); } - + } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java index ccb1894..ea00363 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -61,14 +61,13 @@ import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; @InterfaceAudience.Private @InterfaceStability.Evolving public final class BackupSystemTable implements Closeable { - + static class WALItem { String backupId; String walFile; String backupRoot; - - WALItem(String backupId, String walFile, String backupRoot) - { + + WALItem(String backupId, String walFile, String backupRoot) { this.backupId = backupId; this.walFile = walFile; this.backupRoot = backupRoot; @@ -85,30 +84,29 @@ public final class BackupSystemTable implements Closeable { public String getBackupRoot() { return backupRoot; } - + public String toString() { - return "/"+ backupRoot + "/"+backupId + "/" + walFile; + return "/" + backupRoot + "/" + backupId + "/" + walFile; } - + } - + private static final Log LOG = LogFactory.getLog(BackupSystemTable.class); - private final static TableName tableName = TableName.BACKUP_TABLE_NAME; + private final static TableName tableName = TableName.BACKUP_TABLE_NAME; // Stores backup sessions (contexts) final static byte[] SESSIONS_FAMILY = "session".getBytes(); - // Stores other meta + // Stores other meta final static byte[] META_FAMILY = "meta".getBytes(); // Connection to HBase cluster, shared // among all instances private final Connection connection; - + public BackupSystemTable(Connection conn) throws IOException { this.connection = conn; } - public void close() { - // do nothing + // do nothing } /** @@ -120,7 +118,7 @@ public final class BackupSystemTable implements Closeable { if (LOG.isDebugEnabled()) { LOG.debug("update backup status in hbase:backup for: " + context.getBackupId() - + " set status=" + context.getState()); + + " set status=" + context.getState()); } try (Table table = connection.getTable(tableName)) { Put put = BackupSystemTableHelper.createPutForBackupContext(context); @@ -131,7 +129,7 @@ public final class BackupSystemTable implements Closeable { /** * Deletes backup status from hbase:backup table * @param backupId backup id - * @return true, if operation succeeded, false - otherwise + * @return true, if operation succeeded, false - otherwise * @throws IOException exception */ @@ -160,7 +158,7 @@ public final class BackupSystemTable implements Closeable { try (Table table = connection.getTable(tableName)) { Get get = BackupSystemTableHelper.createGetForBackupContext(backupId); Result res = table.get(get); - if(res.isEmpty()){ + if (res.isEmpty()) { return null; } return BackupSystemTableHelper.resultToBackupInfo(res); @@ -171,7 +169,7 @@ public final class BackupSystemTable implements Closeable { * Read the last backup start code (timestamp) of last successful backup. Will return null if * there is no start code stored on hbase or the value is of length 0. These two cases indicate * there is no successful backup completed so far. - * @param backupRoot root directory path to backup + * @param backupRoot root directory path to backup * @return the timestamp of last successful backup * @throws IOException exception */ @@ -187,7 +185,7 @@ public final class BackupSystemTable implements Closeable { } Cell cell = res.listCells().get(0); byte[] val = CellUtil.cloneValue(cell); - if (val.length == 0){ + if (val.length == 0) { return null; } return new String(val); @@ -197,7 +195,7 @@ public final class BackupSystemTable implements Closeable { /** * Write the start code (timestamp) to hbase:backup. If passed in null, then write 0 byte. * @param startCode start code - * @param backupRoot root directory path to backup + * @param backupRoot root directory path to backup * @throws IOException exception */ public void writeBackupStartCode(Long startCode, String backupRoot) throws IOException { @@ -212,7 +210,7 @@ public final class BackupSystemTable implements Closeable { /** * Get the Region Servers log information after the last log roll from hbase:backup. - * @param backupRoot root directory path to backup + * @param backupRoot root directory path to backup * @return RS log info * @throws IOException exception */ @@ -245,7 +243,7 @@ public final class BackupSystemTable implements Closeable { * Writes Region Server last roll log result (timestamp) to hbase:backup table * @param server - Region Server name * @param timestamp - last log timestamp - * @param backupRoot root directory path to backup + * @param backupRoot root directory path to backup * @throws IOException exception */ public void writeRegionServerLastLogRollResult(String server, Long ts, String backupRoot) @@ -255,7 +253,7 @@ public final class BackupSystemTable implements Closeable { } try (Table table = connection.getTable(tableName)) { Put put = - BackupSystemTableHelper.createPutForRegionServerLastLogRollResult(server,ts,backupRoot); + BackupSystemTableHelper.createPutForRegionServerLastLogRollResult(server, ts, backupRoot); table.put(put); } } @@ -270,29 +268,28 @@ public final class BackupSystemTable implements Closeable { if (LOG.isDebugEnabled()) { LOG.debug("get backup history from hbase:backup"); } - ArrayList list ; - BackupState state = onlyCompleted? BackupState.COMPLETE: BackupState.ANY; + ArrayList list; + BackupState state = onlyCompleted ? BackupState.COMPLETE : BackupState.ANY; list = getBackupContexts(state); - return BackupClientUtil.sortHistoryListDesc(list); + return BackupClientUtil.sortHistoryListDesc(list); } public ArrayList getBackupHistory() throws IOException { return getBackupHistory(false); } - - public ArrayList getBackupHistoryForTable(TableName table) throws IOException { - ArrayList history = getBackupHistory(); - ArrayList list = new ArrayList(); - - for(int i=0; i < history.size(); i++){ - BackupInfo info = history.get(i); - if(info.getTableNames().contains(table)){ - list.add(history.get(i)); + + public ArrayList getBackupHistoryForTable(TableName name) throws IOException { + ArrayList history = getBackupHistory(); + ArrayList tableHistory = new ArrayList(); + for (BackupInfo info : history) { + List tables = info.getTableNames(); + if (tables.contains(name)) { + tableHistory.add(info); } } - return list; + return tableHistory; } - + /** * Get all backup session with a given status (in desc order by time) * @param status status @@ -313,7 +310,7 @@ public final class BackupSystemTable implements Closeable { while ((res = scanner.next()) != null) { res.advance(); BackupInfo context = BackupSystemTableHelper.cellToBackupInfo(res.current()); - if (status != BackupState.ANY && context.getState() != status){ + if (status != BackupState.ANY && context.getState() != status) { continue; } list.add(context); @@ -323,26 +320,25 @@ public final class BackupSystemTable implements Closeable { } /** - * Write the current timestamps for each regionserver to hbase:backup - * after a successful full or incremental backup. The saved timestamp is of the last - * log file that was backed up already. + * Write the current timestamps for each regionserver to hbase:backup after a successful full or + * incremental backup. The saved timestamp is of the last log file that was backed up already. * @param tables tables * @param newTimestamps timestamps - * @param backupRoot root directory path to backup + * @param backupRoot root directory path to backup * @throws IOException exception */ public void writeRegionServerLogTimestamp(Set tables, HashMap newTimestamps, String backupRoot) throws IOException { if (LOG.isDebugEnabled()) { - LOG.debug("write RS log time stamps to hbase:backup for tables ["+ - StringUtils.join(tables, ",")+"]"); + LOG.debug("write RS log time stamps to hbase:backup for tables [" + + StringUtils.join(tables, ",") + "]"); } List puts = new ArrayList(); for (TableName table : tables) { byte[] smapData = toTableServerTimestampProto(table, newTimestamps).toByteArray(); - Put put = - BackupSystemTableHelper.createPutForWriteRegionServerLogTimestamp(table, - smapData, backupRoot); + Put put = + BackupSystemTableHelper.createPutForWriteRegionServerLogTimestamp(table, smapData, + backupRoot); puts.add(put); } try (Table table = connection.getTable(tableName)) { @@ -354,7 +350,7 @@ public final class BackupSystemTable implements Closeable { * Read the timestamp for each region server log after the last successful backup. Each table has * its own set of the timestamps. The info is stored for each table as a concatenated string of * rs->timestapmp - * @param backupRoot root directory path to backup + * @param backupRoot root directory path to backup * @return the timestamp for each region server. key: tableName value: * RegionServer,PreviousTimeStamp * @throws IOException exception @@ -362,7 +358,7 @@ public final class BackupSystemTable implements Closeable { public HashMap> readLogTimestampMap(String backupRoot) throws IOException { if (LOG.isDebugEnabled()) { - LOG.debug("read RS log ts from hbase:backup for root="+ backupRoot); + LOG.debug("read RS log ts from hbase:backup for root=" + backupRoot); } HashMap> tableTimestampMap = @@ -399,7 +395,7 @@ public final class BackupSystemTable implements Closeable { BackupProtos.TableServerTimestamp.newBuilder(); tstBuilder.setTable(ProtobufUtil.toProtoTableName(table)); - for(Entry entry: map.entrySet()) { + for (Entry entry : map.entrySet()) { BackupProtos.ServerTimestamp.Builder builder = BackupProtos.ServerTimestamp.newBuilder(); builder.setServer(entry.getKey()); builder.setTimestamp(entry.getValue()); @@ -411,9 +407,9 @@ public final class BackupSystemTable implements Closeable { private HashMap fromTableServerTimestampProto( BackupProtos.TableServerTimestamp proto) { - HashMap map = new HashMap (); + HashMap map = new HashMap(); List list = proto.getServerTimestampList(); - for(BackupProtos.ServerTimestamp st: list) { + for (BackupProtos.ServerTimestamp st : list) { map.put(st.getServer(), st.getTimestamp()); } return map; @@ -421,12 +417,11 @@ public final class BackupSystemTable implements Closeable { /** * Return the current tables covered by incremental backup. - * @param backupRoot root directory path to backup + * @param backupRoot root directory path to backup * @return set of tableNames * @throws IOException exception */ - public Set getIncrementalBackupTableSet(String backupRoot) - throws IOException { + public Set getIncrementalBackupTableSet(String backupRoot) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("get incr backup table set from hbase:backup"); } @@ -450,13 +445,14 @@ public final class BackupSystemTable implements Closeable { /** * Add tables to global incremental backup set * @param tables - set of tables - * @param backupRoot root directory path to backup + * @param backupRoot root directory path to backup * @throws IOException exception */ - public void addIncrementalBackupTableSet(Set tables, String backupRoot) throws IOException { + public void addIncrementalBackupTableSet(Set tables, String backupRoot) + throws IOException { if (LOG.isDebugEnabled()) { - LOG.debug("Add incremental backup table set to hbase:backup. ROOT="+backupRoot + - " tables ["+ StringUtils.join(tables, " ")+"]"); + LOG.debug("Add incremental backup table set to hbase:backup. ROOT=" + backupRoot + + " tables [" + StringUtils.join(tables, " ") + "]"); for (TableName table : tables) { LOG.debug(table); } @@ -468,23 +464,38 @@ public final class BackupSystemTable implements Closeable { } /** + * Removes incremental backup set + * @param backupRoot backup root + */ + + public void deleteIncrementalBackupTableSet(String backupRoot) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Delete incremental backup table set to hbase:backup. ROOT=" + backupRoot); + } + try (Table table = connection.getTable(tableName)) { + Delete delete = BackupSystemTableHelper.createDeleteForIncrBackupTableSet(backupRoot); + table.delete(delete); + } + } + + /** * Register WAL files as eligible for deletion * @param files files * @param backupId backup id - * @param backupRoot root directory path to backup + * @param backupRoot root directory path to backup * @throws IOException exception */ - public void addWALFiles(List files, String backupId, - String backupRoot) throws IOException { + public void addWALFiles(List files, String backupId, String backupRoot) + throws IOException { if (LOG.isDebugEnabled()) { - LOG.debug("add WAL files to hbase:backup: "+backupId +" "+backupRoot+" files ["+ - StringUtils.join(files, ",")+"]"); - for(String f: files){ - LOG.debug("add :"+f); + LOG.debug("add WAL files to hbase:backup: " + backupId + " " + backupRoot + " files [" + + StringUtils.join(files, ",") + "]"); + for (String f : files) { + LOG.debug("add :" + f); } } try (Table table = connection.getTable(tableName)) { - List puts = + List puts = BackupSystemTableHelper.createPutsForAddWALFiles(files, backupId, backupRoot); table.put(puts); } @@ -492,7 +503,7 @@ public final class BackupSystemTable implements Closeable { /** * Register WAL files as eligible for deletion - * @param backupRoot root directory path to backup + * @param backupRoot root directory path to backup * @throws IOException exception */ public Iterator getWALFilesIterator(String backupRoot) throws IOException { @@ -535,7 +546,7 @@ public final class BackupSystemTable implements Closeable { buf = cells.get(2).getValueArray(); len = cells.get(2).getValueLength(); offset = cells.get(2).getValueOffset(); - String backupRoot = new String(buf, offset, len); + String backupRoot = new String(buf, offset, len); return new WALItem(backupId, walFile, backupRoot); } @@ -549,20 +560,19 @@ public final class BackupSystemTable implements Closeable { } /** - * Check if WAL file is eligible for deletion - * Future: to support all backup destinations + * Check if WAL file is eligible for deletion Future: to support all backup destinations * @param file file * @return true, if - yes. * @throws IOException exception */ public boolean isWALFileDeletable(String file) throws IOException { if (LOG.isDebugEnabled()) { - LOG.debug("Check if WAL file has been already backed up in hbase:backup "+ file); + LOG.debug("Check if WAL file has been already backed up in hbase:backup " + file); } try (Table table = connection.getTable(tableName)) { Get get = BackupSystemTableHelper.createGetForCheckWALFile(file); Result res = table.get(get); - if (res.isEmpty()){ + if (res.isEmpty()) { return false; } return true; @@ -590,11 +600,11 @@ public final class BackupSystemTable implements Closeable { return result; } } - + /** * BACKUP SETS */ - + /** * Get backup set list * @return backup set list @@ -613,115 +623,112 @@ public final class BackupSystemTable implements Closeable { scan.setMaxVersions(1); scanner = table.getScanner(scan); Result res = null; - while ((res = scanner.next()) != null) { - res.advance(); - list.add(BackupSystemTableHelper.cellKeyToBackupSetName(res.current())); - } - return list; - } finally { - if(scanner != null) { - scanner.close(); - } - if (table != null) { - table.close(); - } - } - } - - /** - * Get backup set description (list of tables) - * @param name - set's name - * @return list of tables in a backup set - * @throws IOException - */ - public List describeBackupSet(String name) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug(" Backup set describe: "+name); - } - Table table = null; - try { - table = connection.getTable(tableName); - Get get = BackupSystemTableHelper.createGetForBackupSet(name); - Result res = table.get(get); - if(res.isEmpty()) return null; - res.advance(); - String[] tables = - BackupSystemTableHelper.cellValueToBackupSet(res.current()); - return toList(tables); - } finally { - if (table != null) { - table.close(); - } - } - } - - private List toList(String[] tables) - { - List list = new ArrayList(tables.length); - for(String name: tables) { - list.add(TableName.valueOf(name)); - } - return list; - } - - /** - * Add backup set (list of tables) - * @param name - set name - * @param tables - list of tables, comma-separated - * @throws IOException - */ - public void addToBackupSet(String name, String[] newTables) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("Backup set add: "+name+" tables ["+ StringUtils.join(newTables, " ")+"]"); - } - Table table = null; - String[] union = null; - try { - table = connection.getTable(tableName); - Get get = BackupSystemTableHelper.createGetForBackupSet(name); - Result res = table.get(get); - if(res.isEmpty()) { - union = newTables; - } else { - res.advance(); - String[] tables = - BackupSystemTableHelper.cellValueToBackupSet(res.current()); - union = merge(tables, newTables); - } - Put put = BackupSystemTableHelper.createPutForBackupSet(name, union); - table.put(put); - } finally { - if (table != null) { - table.close(); - } - } - } - - private String[] merge(String[] tables, String[] newTables) { - List list = new ArrayList(); - // Add all from tables - for(String t: tables){ - list.add(t); - } - for(String nt: newTables){ - if(list.contains(nt)) continue; - list.add(nt); - } - String[] arr = new String[list.size()]; - list.toArray(arr); - return arr; - } - - /** - * Remove tables from backup set (list of tables) - * @param name - set name - * @param tables - list of tables, comma-separated - * @throws IOException - */ + while ((res = scanner.next()) != null) { + res.advance(); + list.add(BackupSystemTableHelper.cellKeyToBackupSetName(res.current())); + } + return list; + } finally { + if (scanner != null) { + scanner.close(); + } + if (table != null) { + table.close(); + } + } + } + + /** + * Get backup set description (list of tables) + * @param name - set's name + * @return list of tables in a backup set + * @throws IOException + */ + public List describeBackupSet(String name) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug(" Backup set describe: " + name); + } + Table table = null; + try { + table = connection.getTable(tableName); + Get get = BackupSystemTableHelper.createGetForBackupSet(name); + Result res = table.get(get); + if (res.isEmpty()) return null; + res.advance(); + String[] tables = BackupSystemTableHelper.cellValueToBackupSet(res.current()); + return toList(tables); + } finally { + if (table != null) { + table.close(); + } + } + } + + private List toList(String[] tables) { + List list = new ArrayList(tables.length); + for (String name : tables) { + list.add(TableName.valueOf(name)); + } + return list; + } + + /** + * Add backup set (list of tables) + * @param name - set name + * @param tables - list of tables, comma-separated + * @throws IOException + */ + public void addToBackupSet(String name, String[] newTables) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Backup set add: " + name + " tables [" + StringUtils.join(newTables, " ") + "]"); + } + Table table = null; + String[] union = null; + try { + table = connection.getTable(tableName); + Get get = BackupSystemTableHelper.createGetForBackupSet(name); + Result res = table.get(get); + if (res.isEmpty()) { + union = newTables; + } else { + res.advance(); + String[] tables = BackupSystemTableHelper.cellValueToBackupSet(res.current()); + union = merge(tables, newTables); + } + Put put = BackupSystemTableHelper.createPutForBackupSet(name, union); + table.put(put); + } finally { + if (table != null) { + table.close(); + } + } + } + + private String[] merge(String[] tables, String[] newTables) { + List list = new ArrayList(); + // Add all from tables + for (String t : tables) { + list.add(t); + } + for (String nt : newTables) { + if (list.contains(nt)) continue; + list.add(nt); + } + String[] arr = new String[list.size()]; + list.toArray(arr); + return arr; + } + + /** + * Remove tables from backup set (list of tables) + * @param name - set name + * @param tables - list of tables, comma-separated + * @throws IOException + */ public void removeFromBackupSet(String name, String[] toRemove) throws IOException { if (LOG.isDebugEnabled()) { - LOG.debug(" Backup set remove from : " + name+" tables ["+ - StringUtils.join(toRemove, " ")+"]"); + LOG.debug(" Backup set remove from : " + name + " tables [" + StringUtils.join(toRemove, " ") + + "]"); } Table table = null; String[] disjoint = null; @@ -730,7 +737,7 @@ public final class BackupSystemTable implements Closeable { Get get = BackupSystemTableHelper.createGetForBackupSet(name); Result res = table.get(get); if (res.isEmpty()) { - LOG.warn("Backup set '"+ name+"' not found."); + LOG.warn("Backup set '" + name + "' not found."); return; } else { res.advance(); @@ -742,9 +749,9 @@ public final class BackupSystemTable implements Closeable { table.put(put); } else { // Delete - //describeBackupSet(name); - LOG.warn("Backup set '"+ name+"' does not contain tables ["+ - StringUtils.join(toRemove, " ")+"]"); + // describeBackupSet(name); + LOG.warn("Backup set '" + name + "' does not contain tables [" + + StringUtils.join(toRemove, " ") + "]"); } } finally { if (table != null) { @@ -769,11 +776,11 @@ public final class BackupSystemTable implements Closeable { return arr; } - /** - * Delete backup set - * @param name set's name - * @throws IOException - */ + /** + * Delete backup set + * @param name set's name + * @throws IOException + */ public void deleteBackupSet(String name) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug(" Backup set delete: " + name); @@ -800,12 +807,11 @@ public final class BackupSystemTable implements Closeable { colSessionsDesc.setMaxVersions(1); // Time to keep backup sessions (secs) Configuration config = HBaseConfiguration.create(); - int ttl = - config.getInt(HConstants.BACKUP_SYSTEM_TTL_KEY, HConstants.BACKUP_SYSTEM_TTL_DEFAULT); + int ttl = config.getInt(HConstants.BACKUP_SYSTEM_TTL_KEY, HConstants.BACKUP_SYSTEM_TTL_DEFAULT); colSessionsDesc.setTimeToLive(ttl); tableDesc.addFamily(colSessionsDesc); HColumnDescriptor colMetaDesc = new HColumnDescriptor(META_FAMILY); - //colDesc.setMaxVersions(1); + // colDesc.setMaxVersions(1); tableDesc.addFamily(colMetaDesc); return tableDesc; } @@ -813,7 +819,7 @@ public final class BackupSystemTable implements Closeable { public static String getTableNameAsString() { return tableName.getNameAsString(); } - + public static TableName getTableName() { return tableName; } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java index 5eeb128..37f29f8 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java @@ -48,13 +48,12 @@ import org.apache.hadoop.hbase.util.Bytes; public final class BackupSystemTableHelper { /** - * hbase:backup schema: - * 1. Backup sessions rowkey= "session:" + backupId; value = serialized - * BackupContext - * 2. Backup start code rowkey = "startcode:" + backupRoot; value = startcode - * 3. Incremental backup set rowkey="incrbackupset:" + backupRoot; value=[list of tables] - * 4. Table-RS-timestamp map rowkey="trslm:"+ backupRoot+table_name; value = map[RS-> - * last WAL timestamp] + * hbase:backup schema: + * 1. Backup sessions rowkey= "session:" + backupId; value = serialized BackupContext + * 2. Backup start code rowkey = "startcode:" + backupRoot; value = startcode + * 3. Incremental backup set rowkey="incrbackupset:" + backupRoot; value=[list of tables] + * 4. Table-RS-timestamp map rowkey="trslm:"+ backupRoot+table_name; value = map[RS-> last WAL + * timestamp] * 5. RS - WAL ts map rowkey="rslogts:"+backupRoot +server; value = last WAL timestamp * 6. WALs recorded rowkey="wals:"+WAL unique file name; value = backupId and full WAL file name */ @@ -68,7 +67,7 @@ public final class BackupSystemTableHelper { private final static String SET_KEY_PREFIX = "backupset:"; private final static byte[] EMPTY_VALUE = new byte[] {}; - + // Safe delimiter in a string private final static String NULL = "\u0000"; @@ -130,7 +129,7 @@ public final class BackupSystemTableHelper { * @return get operation * @throws IOException exception */ - static Get createGetForStartCode(String rootPath) throws IOException { + static Get createGetForStartCode(String rootPath) throws IOException { Get get = new Get(rowkey(START_CODE_ROW, rootPath)); get.addFamily(BackupSystemTable.META_FAMILY); get.setMaxVersions(1); @@ -175,6 +174,17 @@ public final class BackupSystemTableHelper { } /** + * Creates Delete for incremental backup table set + * @param backupRoot backup root + * @return delete operation + */ + static Delete createDeleteForIncrBackupTableSet(String backupRoot) { + Delete delete = new Delete(rowkey(INCR_BACKUP_SET, backupRoot)); + delete.addFamily(BackupSystemTable.META_FAMILY); + return delete; + } + + /** * Creates Scan operation to load backup history * @return scan operation */ @@ -207,8 +217,8 @@ public final class BackupSystemTableHelper { * @param smap - map, containing RS:ts * @return put operation */ - static Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap, - String backupRoot) { + static Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap, + String backupRoot) { Put put = new Put(rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot, NULL, table.getNameAsString())); put.addColumn(BackupSystemTable.META_FAMILY, "log-roll-map".getBytes(), smap); return put; @@ -237,8 +247,8 @@ public final class BackupSystemTableHelper { */ static String getTableNameForReadLogTimestampMap(byte[] cloneRow) { String s = new String(cloneRow); - int index = s.lastIndexOf(NULL); - return s.substring(index +1); + int index = s.lastIndexOf(NULL); + return s.substring(index + 1); } /** @@ -247,11 +257,11 @@ public final class BackupSystemTableHelper { * @param timestamp - log roll result (timestamp) * @return put operation */ - static Put createPutForRegionServerLastLogRollResult(String server, - Long timestamp, String backupRoot ) { + static Put createPutForRegionServerLastLogRollResult(String server, Long timestamp, + String backupRoot) { Put put = new Put(rowkey(RS_LOG_TS_PREFIX, backupRoot, NULL, server)); - put.addColumn(BackupSystemTable.META_FAMILY, "rs-log-ts".getBytes(), - timestamp.toString().getBytes()); + put.addColumn(BackupSystemTable.META_FAMILY, "rs-log-ts".getBytes(), timestamp.toString() + .getBytes()); return put; } @@ -280,7 +290,7 @@ public final class BackupSystemTableHelper { static String getServerNameForReadRegionServerLastLogRollResult(byte[] row) { String s = new String(row); int index = s.lastIndexOf(NULL); - return s.substring(index +1); + return s.substring(index + 1); } /** @@ -290,9 +300,8 @@ public final class BackupSystemTableHelper { * @return put list * @throws IOException exception */ - public static List createPutsForAddWALFiles(List files, - String backupId, String backupRoot) - throws IOException { + public static List createPutsForAddWALFiles(List files, String backupId, + String backupRoot) throws IOException { List puts = new ArrayList(); for (String file : files) { @@ -306,9 +315,8 @@ public final class BackupSystemTableHelper { } /** - * Creates Scan operation to load WALs - * TODO: support for backupRoot - * @param backupRoot - path to backup destination + * Creates Scan operation to load WALs TODO: support for backupRoot + * @param backupRoot - path to backup destination * @return scan operation */ public static Scan createScanForGetWALs(String backupRoot) { @@ -321,9 +329,9 @@ public final class BackupSystemTableHelper { scan.addFamily(BackupSystemTable.META_FAMILY); return scan; } + /** - * Creates Get operation for a given wal file name - * TODO: support for backup destination + * Creates Get operation for a given wal file name TODO: support for backup destination * @param file file * @return get operation * @throws IOException exception @@ -335,94 +343,91 @@ public final class BackupSystemTableHelper { return get; } - - /** - * Creates Scan operation to load backup set list - * @return scan operation - */ - static Scan createScanForBackupSetList() { - Scan scan = new Scan(); - byte[] startRow = SET_KEY_PREFIX.getBytes(); - byte[] stopRow = Arrays.copyOf(startRow, startRow.length); - stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); - scan.setStartRow(startRow); - scan.setStopRow(stopRow); - scan.addFamily(BackupSystemTable.META_FAMILY); - return scan; - } - - /** - * Creates Get operation to load backup set content - * @return get operation - */ - static Get createGetForBackupSet(String name) { - Get get = new Get(rowkey(SET_KEY_PREFIX, name)); - get.addFamily(BackupSystemTable.META_FAMILY); - return get; - } - - /** - * Creates Delete operation to delete backup set content - * @param name - backup set's name - * @return delete operation - */ - static Delete createDeleteForBackupSet(String name) { - Delete del = new Delete(rowkey(SET_KEY_PREFIX, name)); - del.addFamily(BackupSystemTable.META_FAMILY); - return del; - } - - - /** - * Creates Put operation to update backup set content - * @param name - backup set's name - * @param tables - list of tables - * @return put operation - */ - static Put createPutForBackupSet(String name, String[] tables) { - Put put = new Put(rowkey(SET_KEY_PREFIX, name)); - byte[] value = convertToByteArray(tables); - put.addColumn(BackupSystemTable.META_FAMILY, "tables".getBytes(), value); - return put; - } - - private static byte[] convertToByteArray(String[] tables) { - return StringUtils.join(tables, ",").getBytes(); - } - - - /** - * Converts cell to backup set list. - * @param current - cell - * @return backup set - * @throws IOException - */ - static String[] cellValueToBackupSet(Cell current) throws IOException { - byte[] data = CellUtil.cloneValue(current); - if( data != null && data.length > 0){ - return new String(data).split(","); - } else{ - return new String[0]; - } - } - - /** - * Converts cell key to backup set name. - * @param current - cell - * @return backup set name - * @throws IOException - */ - static String cellKeyToBackupSetName(Cell current) throws IOException { - byte[] data = CellUtil.cloneRow(current); - return new String(data).substring(SET_KEY_PREFIX.length()); - } - - static byte[] rowkey(String s, String ... other){ - StringBuilder sb = new StringBuilder(s); - for(String ss: other){ - sb.append(ss); - } - return sb.toString().getBytes(); - } - + /** + * Creates Scan operation to load backup set list + * @return scan operation + */ + static Scan createScanForBackupSetList() { + Scan scan = new Scan(); + byte[] startRow = SET_KEY_PREFIX.getBytes(); + byte[] stopRow = Arrays.copyOf(startRow, startRow.length); + stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); + scan.setStartRow(startRow); + scan.setStopRow(stopRow); + scan.addFamily(BackupSystemTable.META_FAMILY); + return scan; + } + + /** + * Creates Get operation to load backup set content + * @return get operation + */ + static Get createGetForBackupSet(String name) { + Get get = new Get(rowkey(SET_KEY_PREFIX, name)); + get.addFamily(BackupSystemTable.META_FAMILY); + return get; + } + + /** + * Creates Delete operation to delete backup set content + * @param name - backup set's name + * @return delete operation + */ + static Delete createDeleteForBackupSet(String name) { + Delete del = new Delete(rowkey(SET_KEY_PREFIX, name)); + del.addFamily(BackupSystemTable.META_FAMILY); + return del; + } + + /** + * Creates Put operation to update backup set content + * @param name - backup set's name + * @param tables - list of tables + * @return put operation + */ + static Put createPutForBackupSet(String name, String[] tables) { + Put put = new Put(rowkey(SET_KEY_PREFIX, name)); + byte[] value = convertToByteArray(tables); + put.addColumn(BackupSystemTable.META_FAMILY, "tables".getBytes(), value); + return put; + } + + private static byte[] convertToByteArray(String[] tables) { + return StringUtils.join(tables, ",").getBytes(); + } + + /** + * Converts cell to backup set list. + * @param current - cell + * @return backup set + * @throws IOException + */ + static String[] cellValueToBackupSet(Cell current) throws IOException { + byte[] data = CellUtil.cloneValue(current); + if (data != null && data.length > 0) { + return new String(data).split(","); + } else { + return new String[0]; + } + } + + /** + * Converts cell key to backup set name. + * @param current - cell + * @return backup set name + * @throws IOException + */ + static String cellKeyToBackupSetName(Cell current) throws IOException { + byte[] data = CellUtil.cloneRow(current); + return new String(data).substring(SET_KEY_PREFIX.length()); + } + + static byte[] rowkey(String s, String... other) { + StringBuilder sb = new StringBuilder(s); + for (String ss : other) { + sb.append(ss); + } + return sb.toString().getBytes(); + } + } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java index 28629b3..5255434 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java @@ -56,7 +56,7 @@ public final class BackupClientUtil { protected static final Log LOG = LogFactory.getLog(BackupClientUtil.class); public static final String LOGNAME_SEPARATOR = "."; - private BackupClientUtil(){ + private BackupClientUtil() { throw new AssertionError("Instantiating utility class..."); } @@ -67,8 +67,7 @@ public final class BackupClientUtil { * @return Yes if path exists * @throws IOException exception */ - public static boolean checkPathExist(String backupStr, Configuration conf) - throws IOException { + public static boolean checkPathExist(String backupStr, Configuration conf) throws IOException { boolean isExist = false; Path backupPath = new Path(backupStr); FileSystem fileSys = backupPath.getFileSystem(conf); @@ -125,7 +124,7 @@ public final class BackupClientUtil { } /** - * Parses host name:port from archived WAL path + * Parses host name:port from archived WAL path * @param p path * @return host name * @throws IOException exception @@ -158,7 +157,7 @@ public final class BackupClientUtil { } public static List getFiles(FileSystem fs, Path rootDir, List files, - PathFilter filter) throws FileNotFoundException, IOException { + PathFilter filter) throws FileNotFoundException, IOException { RemoteIterator it = fs.listFiles(rootDir, true); while (it.hasNext()) { @@ -173,10 +172,8 @@ public final class BackupClientUtil { } return files; } - - public static void cleanupBackupData(BackupInfo context, Configuration conf) - throws IOException - { + + public static void cleanupBackupData(BackupInfo context, Configuration conf) throws IOException { cleanupHLogDir(context, conf); cleanupTargetDir(context, conf); } @@ -209,23 +206,22 @@ public final class BackupClientUtil { /** * Clean up the data at target directory */ - private static void cleanupTargetDir(BackupInfo backupContext, Configuration conf) { + private static void cleanupTargetDir(BackupInfo backupInfo, Configuration conf) { try { // clean up the data at target directory - LOG.debug("Trying to cleanup up target dir : " + backupContext.getBackupId()); - String targetDir = backupContext.getTargetRootDir(); + LOG.debug("Trying to cleanup up target dir : " + backupInfo.getBackupId()); + String targetDir = backupInfo.getTargetRootDir(); if (targetDir == null) { - LOG.warn("No target directory specified for " + backupContext.getBackupId()); + LOG.warn("No target directory specified for " + backupInfo.getBackupId()); return; } - FileSystem outputFs = - FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf); + FileSystem outputFs = FileSystem.get(new Path(backupInfo.getTargetRootDir()).toUri(), conf); - for (TableName table : backupContext.getTables()) { + for (TableName table : backupInfo.getTables()) { Path targetDirPath = - new Path(getTableBackupDir(backupContext.getTargetRootDir(), - backupContext.getBackupId(), table)); + new Path(getTableBackupDir(backupInfo.getTargetRootDir(), backupInfo.getBackupId(), + table)); if (outputFs.delete(targetDirPath, true)) { LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done."); } else { @@ -239,10 +235,10 @@ public final class BackupClientUtil { LOG.debug(tableDir.toString() + " is empty, remove it."); } } - outputFs.delete(new Path(targetDir, backupContext.getBackupId()), true); + outputFs.delete(new Path(targetDir, backupInfo.getBackupId()), true); } catch (IOException e1) { - LOG.error("Cleaning up backup data of " + backupContext.getBackupId() + " at " - + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); + LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " at " + + backupInfo.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); } } @@ -251,17 +247,17 @@ public final class BackupClientUtil { * which is also where the backup manifest file is. return value look like: * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/backup_1396650096738/default/t1_dn/" * @param backupRootDir backup root directory - * @param backupId backup id + * @param backupId backup id * @param table table name * @return backupPath String for the particular table */ - public static String getTableBackupDir(String backupRootDir, String backupId, - TableName tableName) { - return backupRootDir + Path.SEPARATOR+ backupId + Path.SEPARATOR + - tableName.getNamespaceAsString() + Path.SEPARATOR - + tableName.getQualifierAsString() + Path.SEPARATOR ; - } - + public static String + getTableBackupDir(String backupRootDir, String backupId, TableName tableName) { + return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR + + tableName.getNamespaceAsString() + Path.SEPARATOR + tableName.getQualifierAsString() + + Path.SEPARATOR; + } + public static TableName[] parseTableNames(String tables) { if (tables == null) { return null; @@ -280,8 +276,7 @@ public final class BackupClientUtil { * @param historyList history list * @return sorted list of BackupCompleteData */ - public static ArrayList sortHistoryListDesc( - ArrayList historyList) { + public static ArrayList sortHistoryListDesc(ArrayList historyList) { ArrayList list = new ArrayList(); TreeMap map = new TreeMap(); for (BackupInfo h : historyList) { @@ -314,21 +309,19 @@ public final class BackupClientUtil { public static String getUniqueWALFileNamePart(Path p) throws IOException { return p.getName(); } - + /** - * Calls fs.listStatus() and treats FileNotFoundException as non-fatal - * This accommodates differences between hadoop versions, where hadoop 1 - * does not throw a FileNotFoundException, and return an empty FileStatus[] - * while Hadoop 2 will throw FileNotFoundException. - * + * Calls fs.listStatus() and treats FileNotFoundException as non-fatal This accommodates + * differences between hadoop versions, where hadoop 1 does not throw a FileNotFoundException, and + * return an empty FileStatus[] while Hadoop 2 will throw FileNotFoundException. * @param fs file system * @param dir directory * @param filter path filter * @return null if dir is empty or doesn't exist, otherwise FileStatus array */ - public static FileStatus [] listStatus(final FileSystem fs, - final Path dir, final PathFilter filter) throws IOException { - FileStatus [] status = null; + public static FileStatus[] + listStatus(final FileSystem fs, final Path dir, final PathFilter filter) throws IOException { + FileStatus[] status = null; try { status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter); } catch (FileNotFoundException fnfe) { @@ -339,22 +332,21 @@ public final class BackupClientUtil { } if (status == null || status.length < 1) return null; return status; - } - + } + /** - * Return the 'path' component of a Path. In Hadoop, Path is an URI. This - * method returns the 'path' component of a Path's URI: e.g. If a Path is - * hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir, - * this method returns /hbase_trunk/TestTable/compaction.dir. - * This method is useful if you want to print out a Path without qualifying - * Filesystem instance. + * Return the 'path' component of a Path. In Hadoop, Path is an URI. This method returns the + * 'path' component of a Path's URI: e.g. If a Path is + * hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir, this method returns + * /hbase_trunk/TestTable/compaction.dir. This method is useful if you want to print + * out a Path without qualifying Filesystem instance. * @param p Filesystem Path whose 'path' component we are to return. * @return Path portion of the Filesystem */ public static String getPath(Path p) { return p.toUri().getPath(); } - + /** * Given the backup root dir and the backup id, return the log file location for an incremental * backup. @@ -363,23 +355,23 @@ public final class BackupClientUtil { * @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738" */ public static String getLogBackupDir(String backupRootDir, String backupId) { - return backupRootDir + Path.SEPARATOR + backupId+ Path.SEPARATOR + return backupRootDir + Path.SEPARATOR + backupId + Path.SEPARATOR + HConstants.HREGION_LOGDIR_NAME; } - public static List getHistory(Configuration conf, Path backupRootPath) + public static List getHistory(Configuration conf, Path backupRootPath) throws IOException { // Get all (n) history from backup root destination FileSystem fs = FileSystem.get(conf); RemoteIterator it = fs.listLocatedStatus(backupRootPath); List infos = new ArrayList(); - while( it.hasNext()) { - LocatedFileStatus lfs = it.next(); - if(!lfs.isDirectory()) continue; - if(!isBackupDirectory(lfs)) continue; + while (it.hasNext()) { + LocatedFileStatus lfs = it.next(); + if (!lfs.isDirectory()) continue; + if (!isBackupDirectory(lfs)) continue; String backupId = lfs.getPath().getName(); - infos.add(loadBackupInfo(backupRootPath, backupId, fs)); + infos.add(loadBackupInfo(backupRootPath, backupId, fs)); } // Sort Collections.sort(infos, new Comparator() { @@ -388,10 +380,10 @@ public final class BackupClientUtil { public int compare(BackupInfo o1, BackupInfo o2) { long ts1 = getTimestamp(o1.getBackupId()); long ts2 = getTimestamp(o2.getBackupId()); - if(ts1 == ts2) return 0; - return ts1< ts2 ? 1: -1 ; + if (ts1 == ts2) return 0; + return ts1 < ts2 ? 1 : -1; } - + private long getTimestamp(String backupId) { String[] split = backupId.split("_"); return Long.parseLong(split[1]); @@ -400,47 +392,46 @@ public final class BackupClientUtil { return infos; } - public static List getHistory(Configuration conf, int n, - TableName name, Path backupRootPath) throws IOException - { + public static List getHistory(Configuration conf, int n, TableName name, + Path backupRootPath) throws IOException { List infos = getHistory(conf, backupRootPath); - if (name == null) { - if(infos.size() <= n) return infos; + if (name == null) { + if (infos.size() <= n) return infos; return infos.subList(0, n); } else { List ret = new ArrayList(); int count = 0; - for(BackupInfo info: infos) { + for (BackupInfo info : infos) { List names = info.getTableNames(); - if(names.contains(name)) { - ret.add(info); - if(++count == n) { + if (names.contains(name)) { + ret.add(info); + if (++count == n) { break; } } - } + } return ret; - } - } - + } + } + private static boolean isBackupDirectory(LocatedFileStatus lfs) { return lfs.getPath().getName().startsWith(BackupRestoreConstants.BACKUPID_PREFIX); } - - public static BackupInfo loadBackupInfo(Path backupRootPath, String backupId, - FileSystem fs) throws IOException { + + public static BackupInfo loadBackupInfo(Path backupRootPath, String backupId, FileSystem fs) + throws IOException { Path backupPath = new Path(backupRootPath, backupId); - + RemoteIterator it = fs.listFiles(backupPath, true); - while(it.hasNext()) { + while (it.hasNext()) { LocatedFileStatus lfs = it.next(); - if(lfs.getPath().getName().equals(BackupManifest.MANIFEST_FILE_NAME)) { + if (lfs.getPath().getName().equals(BackupManifest.MANIFEST_FILE_NAME)) { // Load BackupManifest BackupManifest manifest = new BackupManifest(fs, lfs.getPath().getParent()); - BackupInfo info = manifest.toBackupInfo(); + BackupInfo info = manifest.toBackupInfo(); return info; - } - } + } + } return null; } } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java index 6d11e69..e122274 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java @@ -19,18 +19,25 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.concurrent.Future; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; import org.apache.hadoop.hbase.backup.BackupRequest; import org.apache.hadoop.hbase.backup.BackupRestoreClientFactory; +import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.RestoreClient; import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; @@ -56,24 +63,23 @@ public class HBaseBackupAdmin implements BackupAdmin { private final HBaseAdmin admin; private final Connection conn; - + HBaseBackupAdmin(HBaseAdmin admin) { this.admin = admin; this.conn = admin.getConnection(); } - - + @Override public void close() throws IOException { } @Override - public BackupInfo getBackupInfo(String backupId) throws IOException { + public BackupInfo getBackupInfo(String backupId) throws IOException { BackupInfo backupInfo = null; try (final BackupSystemTable table = new BackupSystemTable(conn)) { backupInfo = table.readBackupInfo(backupId); return backupInfo; - } + } } @Override @@ -81,8 +87,7 @@ public class HBaseBackupAdmin implements BackupAdmin { BackupInfo backupInfo = null; try (final BackupSystemTable table = new BackupSystemTable(conn)) { if (backupId == null) { - ArrayList recentSessions = - table.getBackupContexts(BackupState.RUNNING); + ArrayList recentSessions = table.getBackupContexts(BackupState.RUNNING); if (recentSessions.isEmpty()) { LOG.warn("No ongoing sessions found."); return -1; @@ -100,55 +105,264 @@ public class HBaseBackupAdmin implements BackupAdmin { return -1; } } - } + } } @Override public int deleteBackups(String[] backupIds) throws IOException { - BackupInfo backupInfo = null; - String backupId = null; + // TODO: requires FT, failure will leave system + // in non-consistent state + // see HBASE-15227 + int totalDeleted = 0; - try (final BackupSystemTable table = new BackupSystemTable(conn)) { + Map> allTablesMap = new HashMap>(); + + try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) { for (int i = 0; i < backupIds.length; i++) { - backupId = backupIds[i]; - LOG.info("Deleting backup for backupID=" + backupId + " ..."); - backupInfo = table.readBackupInfo(backupId); - if (backupInfo != null) { - BackupClientUtil.cleanupBackupData(backupInfo, admin.getConfiguration()); - table.deleteBackupInfo(backupInfo.getBackupId()); - LOG.info("Delete backup for backupID=" + backupId + " completed."); - totalDeleted++; - } else { - LOG.warn("Delete backup failed: no information found for backupID=" + backupId); + BackupInfo info = sysTable.readBackupInfo(backupIds[i]); + if (info != null) { + String rootDir = info.getTargetRootDir(); + HashSet allTables = allTablesMap.get(rootDir); + if (allTables == null) { + allTables = new HashSet(); + allTablesMap.put(rootDir, allTables); + } + allTables.addAll(info.getTableNames()); + totalDeleted += deleteBackup(backupIds[i], sysTable); } } + finalizeDelete(allTablesMap, sysTable); } return totalDeleted; } + /** + * Updates incremental backup set for every backupRoot + * @param tablesMap - Map [backupRoot: Set] + * @param table - backup system table + * @throws IOException + */ + private void finalizeDelete(Map> tablesMap, BackupSystemTable table) + throws IOException { + + for (String backupRoot : tablesMap.keySet()) { + Set incrTableSet = table.getIncrementalBackupTableSet(backupRoot); + for (TableName name : tablesMap.get(backupRoot)) { + ArrayList history = table.getBackupHistoryForTable(name); + if (history.isEmpty()) { + // No more backups for a table + incrTableSet.remove(name); + } + } + if (!incrTableSet.isEmpty()) { + table.addIncrementalBackupTableSet(incrTableSet, backupRoot); + } else { // empty + table.deleteIncrementalBackupTableSet(backupRoot); + } + } + } + + /** + * Delete single backup and all related backups + * Algorithm: + * + * Backup type: FULL or INCREMENTAL + * Is this last backup session for table T: YES or NO + * For every table T from table list 'tables': + * if(FULL, YES) deletes only physical data (PD) + * if(FULL, NO), deletes PD, scans all newer backups and removes T from backupInfo, until + * we either reach the most recent backup for T in the system or FULL backup which + * includes T + * if(INCREMENTAL, YES) deletes only physical data (PD) + * if(INCREMENTAL, NO) deletes physical data and for table T scans all backup images + * between last FULL backup, which is older than the backup being deleted and the next + * FULL backup (if exists) or last one for a particular table T and removes T from list + * of backup tables. + * @param backupId - backup id + * @param sysTable - backup system table + * @return total - number of deleted backup images + * @throws IOException + */ + private int deleteBackup(String backupId, BackupSystemTable sysTable) throws IOException { + + BackupInfo backupInfo = sysTable.readBackupInfo(backupId); + + int totalDeleted = 0; + if (backupInfo != null) { + LOG.info("Deleting backup " + backupInfo.getBackupId() + " ..."); + BackupClientUtil.cleanupBackupData(backupInfo, admin.getConfiguration()); + // List of tables in this backup; + List tables = backupInfo.getTableNames(); + long startTime = backupInfo.getStartTs(); + for (TableName tn : tables) { + boolean isLastBackupSession = isLastBackupSession(sysTable, tn, startTime); + if (isLastBackupSession) { + continue; + } + // else + List affectedBackups = getAffectedBackupInfos(backupInfo, tn, sysTable); + for (BackupInfo info : affectedBackups) { + if (info.equals(backupInfo)) { + continue; + } + removeTableFromBackupImage(info, tn, sysTable); + } + } + LOG.debug("Delete backup info "+ backupInfo.getBackupId()); + + sysTable.deleteBackupInfo(backupInfo.getBackupId()); + LOG.info("Delete backup " + backupInfo.getBackupId() + " completed."); + totalDeleted++; + } else { + LOG.warn("Delete backup failed: no information found for backupID=" + backupId); + } + return totalDeleted; + } + + private void removeTableFromBackupImage(BackupInfo info, TableName tn, BackupSystemTable sysTable) + throws IOException { + List tables = info.getTableNames(); + LOG.debug("Remove "+ tn +" from " + info.getBackupId() + " tables=" + + info.getTableListAsString()); + if (tables.contains(tn)) { + tables.remove(tn); + + if (tables.isEmpty()) { + LOG.debug("Delete backup info "+ info.getBackupId()); + + sysTable.deleteBackupInfo(info.getBackupId()); + BackupClientUtil.cleanupBackupData(info, conn.getConfiguration()); + } else { + info.setTables(tables); + sysTable.updateBackupInfo(info); + // Now, clean up directory for table + cleanupBackupDir(info, tn, conn.getConfiguration()); + } + } + } + + private List getAffectedBackupInfos(BackupInfo backupInfo, TableName tn, + BackupSystemTable table) throws IOException { + LOG.debug("GetAffectedBackupInfos for: "+ backupInfo.getBackupId()+" table="+ tn); + long ts = backupInfo.getStartTs(); + BackupType type = backupInfo.getType(); + List list = new ArrayList(); + List history = table.getBackupHistory(); + if (type == BackupType.FULL) { + // Scan from most recent to backupInfo + // break when backupInfo reached + for (BackupInfo info : history) { + if (info.getStartTs() == ts) { + break; + } + List tables = info.getTableNames(); + if (tables.contains(tn)) { + BackupType bt = info.getType(); + if (bt == BackupType.FULL) { + list.clear(); + } else { + LOG.debug("GetAffectedBackupInfos for: "+ backupInfo.getBackupId()+" table="+ tn+" added "+ info.getBackupId()+ + " tables="+info.getTableListAsString()); + list.add(info); + } + } + } + } else { + // Find first FULL backup image which contains + // 'tn' and which is older than 'backupInfo' + // + // it can return null? + for (BackupInfo info : history) { + List tables = info.getTableNames(); + if (info.getStartTs() == ts) { + break; + } + if (tables.contains(tn)) { + BackupType bt = info.getType(); + if (bt == BackupType.FULL) { + list.clear(); + } else { + LOG.debug("GetAffectedBackupInfos for: "+ backupInfo.getBackupId()+" table="+ tn+" added "+ info.getBackupId()+ + " tables="+info.getTableListAsString()); + list.add(info); + } + } + } + } + return list; + } + + + + /** + * Clean up the data at target directory + */ + private void cleanupBackupDir(BackupInfo backupInfo, TableName table, Configuration conf) { + try { + // clean up the data at target directory + String targetDir = backupInfo.getTargetRootDir(); + if (targetDir == null) { + LOG.warn("No target directory specified for " + backupInfo.getBackupId()); + return; + } + + FileSystem outputFs = FileSystem.get(new Path(backupInfo.getTargetRootDir()).toUri(), conf); + + Path targetDirPath = + new Path(BackupClientUtil.getTableBackupDir(backupInfo.getTargetRootDir(), + backupInfo.getBackupId(), table)); + if (outputFs.delete(targetDirPath, true)) { + LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done."); + } else { + LOG.info("No data has been found in " + targetDirPath.toString() + "."); + } + + } catch (IOException e1) { + LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " for table " + table + + "at " + backupInfo.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); + } + } + + private boolean isLastBackupSession(BackupSystemTable table, TableName tn, long startTime) + throws IOException { + ArrayList history = table.getBackupHistory(); + for (BackupInfo info : history) { + List tables = info.getTableNames(); + if (!tables.contains(tn)) { + continue; + } + if (info.getStartTs() <= startTime) { + return true; + } else { + return false; + } + } + return false; + } + @Override public List getHistory(int n) throws IOException { try (final BackupSystemTable table = new BackupSystemTable(conn)) { List history = table.getBackupHistory(); - if( history.size() <= n) return history; + if (history.size() <= n) return history; List list = new ArrayList(); - for(int i=0; i < n; i++){ + for (int i = 0; i < n; i++) { list.add(history.get(i)); } return list; - } + } } @Override public List getHistory(int n, TableName name) throws IOException { - if(name == null) return getHistory(n); + if (name == null) return getHistory(n); try (final BackupSystemTable table = new BackupSystemTable(conn)) { - List history = table.getBackupHistoryForTable(name); - n = Math.min(n, history.size()); + List history = table.getBackupHistoryForTable(name); + n = Math.min(n, history.size()); return history.subList(0, n); - } + } } - + @Override public List listBackupSets() throws IOException { try (final BackupSystemTable table = new BackupSystemTable(conn)) { @@ -156,8 +370,8 @@ public class HBaseBackupAdmin implements BackupAdmin { List bslist = new ArrayList(); for (String s : list) { List tables = table.describeBackupSet(s); - if(tables != null){ - bslist.add( new BackupSet(s, tables)); + if (tables != null) { + bslist.add(new BackupSet(s, tables)); } } return bslist; @@ -168,70 +382,64 @@ public class HBaseBackupAdmin implements BackupAdmin { public BackupSet getBackupSet(String name) throws IOException { try (final BackupSystemTable table = new BackupSystemTable(conn)) { List list = table.describeBackupSet(name); - if(list == null) return null; + if (list == null) return null; return new BackupSet(name, list); - } + } } @Override public boolean deleteBackupSet(String name) throws IOException { try (final BackupSystemTable table = new BackupSystemTable(conn)) { - if(table.describeBackupSet(name) == null) { + if (table.describeBackupSet(name) == null) { return false; } table.deleteBackupSet(name); return true; - } + } } @Override public void addToBackupSet(String name, TableName[] tables) throws IOException { String[] tableNames = new String[tables.length]; - for(int i = 0; i < tables.length; i++){ + for (int i = 0; i < tables.length; i++) { tableNames[i] = tables[i].getNameAsString(); if (!admin.tableExists(TableName.valueOf(tableNames[i]))) { throw new IOException("Cannot add " + tableNames[i] + " because it doesn't exist"); } } try (final BackupSystemTable table = new BackupSystemTable(conn)) { - table.addToBackupSet(name, tableNames); - LOG.info("Added tables ["+StringUtils.join(tableNames, " ")+"] to '" + name + "' backup set"); - } + table.addToBackupSet(name, tableNames); + LOG.info("Added tables [" + StringUtils.join(tableNames, " ") + "] to '" + name + + "' backup set"); + } } @Override public void removeFromBackupSet(String name, String[] tables) throws IOException { - LOG.info("Removing tables ["+ StringUtils.join(tables, " ")+"] from '" + name + "'"); - try (final BackupSystemTable table = new BackupSystemTable(conn)) { + LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + "'"); + try (final BackupSystemTable table = new BackupSystemTable(conn)) { table.removeFromBackupSet(name, tables); - LOG.info("Removing tables ["+ StringUtils.join(tables, " ")+"] from '" + name + "' completed."); - } + LOG.info("Removing tables [" + StringUtils.join(tables, " ") + "] from '" + name + + "' completed."); + } } @Override public void restore(RestoreRequest request) throws IOException { RestoreClient client = BackupRestoreClientFactory.getRestoreClient(admin.getConfiguration()); - client.restore(request.getBackupRootDir(), - request.getBackupId(), - request.isCheck(), - request.getFromTables(), - request.getToTables(), - request.isOverwrite()); - + client.restore(request.getBackupRootDir(), request.getBackupId(), request.isCheck(), + request.getFromTables(), request.getToTables(), request.isOverwrite()); + } @Override - public String backupTables(final BackupRequest userRequest) - throws IOException { - return admin.backupTables(userRequest); + public String backupTables(final BackupRequest userRequest) throws IOException { + return admin.backupTables(userRequest); } - - + @Override - public Future backupTablesAsync(final BackupRequest userRequest) - throws IOException { + public Future backupTablesAsync(final BackupRequest userRequest) throws IOException { return admin.backupTablesAsync(userRequest); } - - + } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java new file mode 100644 index 0000000..5fc671d --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletes.java @@ -0,0 +1,195 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.hamcrest.CoreMatchers; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + + +/** + * Create multiple backups for two tables: table1, table2 + * then perform 3 deletes + */ +@Category(LargeTests.class) +public class TestBackupMultipleDeletes extends TestBackupBase { + private static final Log LOG = LogFactory.getLog(TestBackupMultipleDeletes.class); + @Test + public void testBackupMultipleDeletes() throws Exception { + // #1 - create full backup for all tables + LOG.info("create full backup image for all tables"); + + List tables = Lists.newArrayList(table1, table2); + HBaseAdmin admin = null; + Connection conn = ConnectionFactory.createConnection(conf1); + admin = (HBaseAdmin) conn.getAdmin(); + + BackupRequest request = new BackupRequest(); + request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdFull = admin.getBackupAdmin().backupTables(request); + + assertTrue(checkSucceeded(backupIdFull)); + + // #2 - insert some data to table table1 + HTable t1 = (HTable) conn.getTable(table1); + Put p1; + for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { + p1 = new Put(Bytes.toBytes("row-t1" + i)); + p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t1.put(p1); + } + + Assert.assertThat(TEST_UTIL.countRows(t1), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2)); + t1.close(); + + + // #3 - incremental backup for table1, table2 + tables = Lists.newArrayList(table1, table2); + request = new BackupRequest(); + request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) + .setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdInc1 = admin.getBackupAdmin().backupTables(request); + assertTrue(checkSucceeded(backupIdInc1)); + + // #4 - insert some data to table table2 + HTable t2 = (HTable) conn.getTable(table2); + Put p2 = null; + for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { + p2 = new Put(Bytes.toBytes("row-t2" + i)); + p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t2.put(p2); + } + + + // #5 - incremental backup for table1, table2 + tables = Lists.newArrayList(table1, table2); + request = new BackupRequest(); + request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) + .setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdInc2 = admin.getBackupAdmin().backupTables(request); + assertTrue(checkSucceeded(backupIdInc2)); + + // #6 - insert some data to table table1 + t1 = (HTable) conn.getTable(table1); + for (int i = NB_ROWS_IN_BATCH; i < 2*NB_ROWS_IN_BATCH; i++) { + p1 = new Put(Bytes.toBytes("row-t1" + i)); + p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t1.put(p1); + } + + + // #7 - incremental backup for table1, table2 + tables = Lists.newArrayList(table1, table2); + request = new BackupRequest(); + request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) + .setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdInc3 = admin.getBackupAdmin().backupTables(request); + assertTrue(checkSucceeded(backupIdInc3)); + + // #8 - insert some data to table table2 + t2 = (HTable) conn.getTable(table2); + for (int i = NB_ROWS_IN_BATCH; i < 2*NB_ROWS_IN_BATCH; i++) { + p2 = new Put(Bytes.toBytes("row-t1" + i)); + p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t2.put(p2); + } + + // #9 - incremental backup for table1, table2 + tables = Lists.newArrayList(table1, table2); + request = new BackupRequest(); + request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) + .setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdInc4 = admin.getBackupAdmin().backupTables(request); + assertTrue(checkSucceeded(backupIdInc4)); + + + // #10 - insert some data to table table1 + t1 = (HTable) conn.getTable(table1); + for (int i = 2*NB_ROWS_IN_BATCH; i < 3*NB_ROWS_IN_BATCH; i++) { + p1 = new Put(Bytes.toBytes("row-t1" + i)); + p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t1.put(p1); + } + + // #11- incremental backup for table1, table2 + tables = Lists.newArrayList(table1, table2); + request = new BackupRequest(); + request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) + .setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdInc5 = admin.getBackupAdmin().backupTables(request); + assertTrue(checkSucceeded(backupIdInc5)); + + // #12 - insert some data to table table2 + t2 = (HTable) conn.getTable(table2); + for (int i = 2*NB_ROWS_IN_BATCH; i < 3*NB_ROWS_IN_BATCH; i++) { + p2 = new Put(Bytes.toBytes("row-t1" + i)); + p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t2.put(p2); + } + + // #13- incremental backup for table1, table2 + tables = Lists.newArrayList(table1, table2); + request = new BackupRequest(); + request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) + .setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdInc6 = admin.getBackupAdmin().backupTables(request); + assertTrue(checkSucceeded(backupIdInc6)); + + int totalBackups = admin.getBackupAdmin().getHistory(100).size(); + + LOG.error("Delete backupIdInc6"); + admin.getBackupAdmin().deleteBackups( new String[]{backupIdInc6}); + LOG.error("Delete backupIdInc6 done"); + int backups = admin.getBackupAdmin().getHistory(100).size(); + assertEquals(totalBackups -1, backups); + LOG.error("Delete backupIdInc3"); + admin.getBackupAdmin().deleteBackups( new String[]{backupIdInc3}); + LOG.error("Delete backupIdInc3 done"); + backups = admin.getBackupAdmin().getHistory(100).size(); + assertEquals(totalBackups - 4, backups); + LOG.error("Delete backupIdFull"); + admin.getBackupAdmin().deleteBackups( new String[]{backupIdFull}); + LOG.error("Delete backupIdFull done"); + backups = admin.getBackupAdmin().getHistory(100).size(); + + assertEquals(totalBackups - 7, backups); + + admin.close(); + conn.close(); + } + +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletesEnchanced.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletesEnchanced.java new file mode 100644 index 0000000..8f4ad30 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMultipleDeletesEnchanced.java @@ -0,0 +1,187 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.hamcrest.CoreMatchers; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + + +/** + * Create multiple backups for two tables: table1, table2 + * then perform 3 deletes + */ +@Category(LargeTests.class) +public class TestBackupMultipleDeletesEnchanced extends TestBackupBase { + private static final Log LOG = LogFactory.getLog(TestBackupMultipleDeletesEnchanced.class); + @Test + public void testBackupMultipleDeletes() throws Exception { + // #1 - create full backup for all tables + LOG.info("create full backup image for all tables"); + + List tables = Lists.newArrayList(table1, table2); + HBaseAdmin admin = null; + Connection conn = ConnectionFactory.createConnection(conf1); + admin = (HBaseAdmin) conn.getAdmin(); + + BackupRequest request = new BackupRequest(); + request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdFull = admin.getBackupAdmin().backupTables(request); + + assertTrue(checkSucceeded(backupIdFull)); + + // #2 - insert some data to table table1 + HTable t1 = (HTable) conn.getTable(table1); + Put p1; + for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { + p1 = new Put(Bytes.toBytes("row-t1" + i)); + p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t1.put(p1); + } + + Assert.assertThat(TEST_UTIL.countRows(t1), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2)); + t1.close(); + + + // #3 - incremental backup for table1 + tables = Lists.newArrayList(table1); + request = new BackupRequest(); + request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) + .setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdInc1 = admin.getBackupAdmin().backupTables(request); + assertTrue(checkSucceeded(backupIdInc1)); + + // #4 - insert some data to table table2 + HTable t2 = (HTable) conn.getTable(table2); + Put p2 = null; + for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { + p2 = new Put(Bytes.toBytes("row-t2" + i)); + p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t2.put(p2); + } + + + // #5 - incremental backup for table1, table2 + tables = Lists.newArrayList(table1, table2); + request = new BackupRequest(); + request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) + .setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdInc2 = admin.getBackupAdmin().backupTables(request); + assertTrue(checkSucceeded(backupIdInc2)); + + // #6 - insert some data to table table1 + t1 = (HTable) conn.getTable(table1); + for (int i = NB_ROWS_IN_BATCH; i < 2*NB_ROWS_IN_BATCH; i++) { + p1 = new Put(Bytes.toBytes("row-t1" + i)); + p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t1.put(p1); + } + + + // #7 - incremental backup for table1 + tables = Lists.newArrayList(table1); + request = new BackupRequest(); + request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) + .setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdInc3 = admin.getBackupAdmin().backupTables(request); + assertTrue(checkSucceeded(backupIdInc3)); + + // #8 - insert some data to table table2 + t2 = (HTable) conn.getTable(table2); + for (int i = NB_ROWS_IN_BATCH; i < 2*NB_ROWS_IN_BATCH; i++) { + p2 = new Put(Bytes.toBytes("row-t1" + i)); + p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t2.put(p2); + } + + // #9 - incremental backup for table1, table2 + tables = Lists.newArrayList(table1, table2); + request = new BackupRequest(); + request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) + .setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdInc4 = admin.getBackupAdmin().backupTables(request); + assertTrue(checkSucceeded(backupIdInc4)); + // #10 full backup for table3 + tables = Lists.newArrayList(table3); + + request = new BackupRequest(); + request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdFull2 = admin.getBackupAdmin().backupTables(request); + assertTrue(checkSucceeded(backupIdFull2)); + // #11 - incremental backup for table3 + tables = Lists.newArrayList(table3); + request = new BackupRequest(); + request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) + .setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdInc5 = admin.getBackupAdmin().backupTables(request); + assertTrue(checkSucceeded(backupIdInc5)); + LOG.error("Delete backupIdInc2"); + admin.getBackupAdmin().deleteBackups( new String[]{backupIdInc2}); + LOG.error("Delete backupIdInc2 done"); + List list = admin.getBackupAdmin().getHistory(100); + // First check number of backup images before and after + assertEquals(4, list.size()); + // then verify that no backupIdInc2,3,4 + Set ids = new HashSet(); + ids.add(backupIdInc2); + ids.add(backupIdInc3); + ids.add(backupIdInc4); + + for(BackupInfo info: list) { + String backupId = info.getBackupId(); + if(ids.contains(backupId)) { + assertTrue(false); + } + } + // Verify that backupInc5 contains only table3 + boolean found = false; + for(BackupInfo info: list) { + String backupId = info.getBackupId(); + if(backupId.equals(backupIdInc5)) { + assertTrue(info.getTables().size() == 1); + assertEquals(table3, info.getTableNames().get(0)); + found = true; + } + } + assertTrue(found); + admin.close(); + conn.close(); + } + +}