diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java index d44ba4e..4d6b2a7 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java @@ -32,6 +32,7 @@ import java.util.Set; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.util.BackupClientUtil; @@ -112,11 +113,11 @@ public class BackupInfo implements Comparable { private long bandwidth = -1; public BackupInfo() { + backupStatusMap = new HashMap(); } public BackupInfo(String backupId, BackupType type, TableName[] tables, String targetRootDir) { - backupStatusMap = new HashMap(); - + this(); this.backupId = backupId; this.type = type; this.targetRootDir = targetRootDir; @@ -467,6 +468,5 @@ public class BackupInfo implements Comparable { new Long(o.getBackupId().substring(o.getBackupId().lastIndexOf("_") + 1)); return thisTS.compareTo(otherTS); } - - + } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java index 63f7a55..d92614c 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java @@ -25,6 +25,7 @@ import org.apache.commons.cli.CommandLine; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo; @@ -61,10 +62,11 @@ public final class BackupCommands { + "Enter \'help COMMAND\' to see help message for each command\n"; private static final String CREATE_CMD_USAGE = - "Usage: hbase backup create [tables] [-s name] [-convert] " - + "[-silent] [-w workers][-b bandwith]\n" + " type \"full\" to create a full backup image;\n" + "Usage: hbase backup create [tables] [-s name] [-convert] " + + "[-silent] [-w workers][-b bandwith]\n" + + " type \"full\" to create a full backup image;\n" + " \"incremental\" to create an incremental backup image\n" - + " backup_root_path The full root path to store the backup image,\n" + + " BACKUP_ROOT The full root path to store the backup image,\n" + " the prefix can be hdfs, webhdfs or gpfs\n" + " Options:\n" + " tables If no tables (\"\") are specified, all tables are backed up. " + "Otherwise it is a\n" + " comma separated list of tables.\n" @@ -78,8 +80,12 @@ public final class BackupCommands { private static final String DESCRIBE_CMD_USAGE = "Usage: hbase backup decsribe \n" + " backupId backup image id\n"; - private static final String HISTORY_CMD_USAGE = "Usage: hbase backup history [-n N]\n" - + " -n N show up to N last backup sessions, default - 10;\n"; + private static final String HISTORY_CMD_USAGE = + "Usage: hbase backup history [-path BACKUP_ROOT] [-n N] [-t table]\n" + + " -n N show up to N last backup sessions, default - 10;\n" + + " -path backup root path;\n" + + " -t table name; "; + private static final String DELETE_CMD_USAGE = "Usage: hbase backup delete \n" + " backupId backup image id;\n"; @@ -396,14 +402,40 @@ public final class BackupCommands { public void execute() throws IOException { int n = parseHistoryLength(); + TableName tableName = getTableName(); + Path backupRootPath = getBackupRootPath(); + List history = null; Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); - try(final Connection conn = ConnectionFactory.createConnection(conf); + if(backupRootPath == null) { + // Load from hbase:backup + try(final Connection conn = ConnectionFactory.createConnection(conf); final BackupAdmin admin = conn.getAdmin().getBackupAdmin();){ - List history = admin.getHistory(n); - for(BackupInfo info: history){ - System.out.println(info.getShortDescription()); - } - } + history = admin.getHistory(n, tableName); + } + } else { + // load from backup FS + history = BackupClientUtil.getHistory(conf, n, tableName, backupRootPath); + } + for(BackupInfo info: history){ + System.out.println(info.getShortDescription()); + } + } + + private Path getBackupRootPath() { + String value = cmdline.getOptionValue("t"); + if (value == null) return null; + return new Path(value); + } + + private TableName getTableName() { + String value = cmdline.getOptionValue("t"); + if (value == null) return null; + try{ + return TableName.valueOf(value); + } catch (IllegalArgumentException e){ + System.out.println("Illegal argument: "+ value); + return null; + } } private int parseHistoryLength() { diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java new file mode 100644 index 0000000..19236b6 --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java @@ -0,0 +1,783 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.TreeMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; + +import com.google.protobuf.InvalidProtocolBufferException; + + +/** + * Backup manifest Contains all the meta data of a backup image. The manifest info will be bundled + * as manifest file together with data. So that each backup image will contain all the info needed + * for restore. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BackupManifest { + + private static final Log LOG = LogFactory.getLog(BackupManifest.class); + + // manifest file name + public static final String MANIFEST_FILE_NAME = ".backup.manifest"; + + // manifest file version, current is 1.0 + public static final String MANIFEST_VERSION = "1.0"; + + // backup image, the dependency graph is made up by series of backup images + + public static class BackupImage implements Comparable { + + private String backupId; + private BackupType type; + private String rootDir; + private List tableList; + private long startTs; + private long completeTs; + private ArrayList ancestors; + + public BackupImage() { + super(); + } + + public BackupImage(String backupId, BackupType type, String rootDir, + List tableList, long startTs, long completeTs) { + this.backupId = backupId; + this.type = type; + this.rootDir = rootDir; + this.tableList = tableList; + this.startTs = startTs; + this.completeTs = completeTs; + } + + static BackupImage fromProto(BackupProtos.BackupImage im) { + String backupId = im.getBackupId(); + String rootDir = im.getRootDir(); + long startTs = im.getStartTs(); + long completeTs = im.getCompleteTs(); + List tableListList = im.getTableListList(); + List tableList = new ArrayList(); + for(HBaseProtos.TableName tn : tableListList) { + tableList.add(ProtobufUtil.toTableName(tn)); + } + + List ancestorList = im.getAncestorsList(); + + BackupType type = + im.getBackupType() == BackupProtos.BackupType.FULL ? BackupType.FULL: + BackupType.INCREMENTAL; + + BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs); + for(BackupProtos.BackupImage img: ancestorList) { + image.addAncestor(fromProto(img)); + } + return image; + } + + BackupProtos.BackupImage toProto() { + BackupProtos.BackupImage.Builder builder = BackupProtos.BackupImage.newBuilder(); + builder.setBackupId(backupId); + builder.setCompleteTs(completeTs); + builder.setStartTs(startTs); + builder.setRootDir(rootDir); + if (type == BackupType.FULL) { + builder.setBackupType(BackupProtos.BackupType.FULL); + } else{ + builder.setBackupType(BackupProtos.BackupType.INCREMENTAL); + } + + for (TableName name: tableList) { + builder.addTableList(ProtobufUtil.toProtoTableName(name)); + } + + if (ancestors != null){ + for (BackupImage im: ancestors){ + builder.addAncestors(im.toProto()); + } + } + + return builder.build(); + } + + public String getBackupId() { + return backupId; + } + + public void setBackupId(String backupId) { + this.backupId = backupId; + } + + public BackupType getType() { + return type; + } + + public void setType(BackupType type) { + this.type = type; + } + + public String getRootDir() { + return rootDir; + } + + public void setRootDir(String rootDir) { + this.rootDir = rootDir; + } + + public List getTableNames() { + return tableList; + } + + public void setTableList(List tableList) { + this.tableList = tableList; + } + + public long getStartTs() { + return startTs; + } + + public void setStartTs(long startTs) { + this.startTs = startTs; + } + + public long getCompleteTs() { + return completeTs; + } + + public void setCompleteTs(long completeTs) { + this.completeTs = completeTs; + } + + public ArrayList getAncestors() { + if (this.ancestors == null) { + this.ancestors = new ArrayList(); + } + return this.ancestors; + } + + public void addAncestor(BackupImage backupImage) { + this.getAncestors().add(backupImage); + } + + public boolean hasAncestor(String token) { + for (BackupImage image : this.getAncestors()) { + if (image.getBackupId().equals(token)) { + return true; + } + } + return false; + } + + public boolean hasTable(TableName table) { + for (TableName t : tableList) { + if (t.equals(table)) { + return true; + } + } + return false; + } + + @Override + public int compareTo(BackupImage other) { + String thisBackupId = this.getBackupId(); + String otherBackupId = other.getBackupId(); + Long thisTS = new Long(thisBackupId.substring(thisBackupId.lastIndexOf("_") + 1)); + Long otherTS = new Long(otherBackupId.substring(otherBackupId.lastIndexOf("_") + 1)); + return thisTS.compareTo(otherTS); + } + } + + // manifest version + private String version = MANIFEST_VERSION; + + // hadoop hbase configuration + protected Configuration config = null; + + // backup root directory + private String rootDir = null; + + // backup image directory + private String tableBackupDir = null; + + // backup log directory if this is an incremental backup + private String logBackupDir = null; + + // backup token + private String backupId; + + // backup type, full or incremental + private BackupType type; + + // the table list for the backup + private ArrayList tableList; + + // actual start timestamp of the backup process + private long startTs; + + // actual complete timestamp of the backup process + private long completeTs; + + // the region server timestamp for tables: + // > + private Map> incrTimeRanges; + + // dependency of this backup, including all the dependent images to do PIT recovery + private Map dependency; + + /** + * Construct manifest for a ongoing backup. + * @param backupCtx The ongoing backup context + */ + public BackupManifest(BackupInfo backupCtx) { + this.backupId = backupCtx.getBackupId(); + this.type = backupCtx.getType(); + this.rootDir = backupCtx.getTargetRootDir(); + if (this.type == BackupType.INCREMENTAL) { + this.logBackupDir = backupCtx.getHLogTargetDir(); + } + this.startTs = backupCtx.getStartTs(); + this.completeTs = backupCtx.getEndTs(); + this.loadTableList(backupCtx.getTableNames()); + } + + + /** + * Construct a table level manifest for a backup of the named table. + * @param backupCtx The ongoing backup context + */ + public BackupManifest(BackupInfo backupCtx, TableName table) { + this.backupId = backupCtx.getBackupId(); + this.type = backupCtx.getType(); + this.rootDir = backupCtx.getTargetRootDir(); + this.tableBackupDir = backupCtx.getBackupStatus(table).getTargetDir(); + if (this.type == BackupType.INCREMENTAL) { + this.logBackupDir = backupCtx.getHLogTargetDir(); + } + this.startTs = backupCtx.getStartTs(); + this.completeTs = backupCtx.getEndTs(); + List tables = new ArrayList(); + tables.add(table); + this.loadTableList(tables); + } + + /** + * Construct manifest from a backup directory. + * @param conf configuration + * @param backupPath backup path + * @throws IOException + */ + + public BackupManifest(Configuration conf, Path backupPath) throws IOException { + this(backupPath.getFileSystem(conf), backupPath); + } + + /** + * Construct manifest from a backup directory. + * @param conf configuration + * @param backupPath backup path + * @throws BackupException exception + */ + + public BackupManifest(FileSystem fs, Path backupPath) throws BackupException { + if (LOG.isDebugEnabled()) { + LOG.debug("Loading manifest from: " + backupPath.toString()); + } + // The input backupDir may not exactly be the backup table dir. + // It could be the backup log dir where there is also a manifest file stored. + // This variable's purpose is to keep the correct and original location so + // that we can store/persist it. + this.tableBackupDir = backupPath.toString(); + this.config = fs.getConf(); + try { + + FileStatus[] subFiles = BackupClientUtil.listStatus(fs, backupPath, null); + if (subFiles == null) { + String errorMsg = backupPath.toString() + " does not exist"; + LOG.error(errorMsg); + throw new IOException(errorMsg); + } + for (FileStatus subFile : subFiles) { + if (subFile.getPath().getName().equals(MANIFEST_FILE_NAME)) { + + // load and set manifest field from file content + FSDataInputStream in = fs.open(subFile.getPath()); + long len = subFile.getLen(); + byte[] pbBytes = new byte[(int) len]; + in.readFully(pbBytes); + BackupProtos.BackupManifest proto = null; + try{ + proto = parseFrom(pbBytes); + } catch(Exception e){ + throw new BackupException(e); + } + this.version = proto.getVersion(); + this.backupId = proto.getBackupId(); + this.type = BackupType.valueOf(proto.getType().name()); + // Here the parameter backupDir is where the manifest file is. + // There should always be a manifest file under: + // backupRootDir/namespace/table/backupId/.backup.manifest + this.rootDir = backupPath.getParent().getParent().getParent().toString(); + + Path p = backupPath.getParent(); + if (p.getName().equals(HConstants.HREGION_LOGDIR_NAME)) { + this.rootDir = p.getParent().toString(); + } else { + this.rootDir = p.getParent().getParent().toString(); + } + + loadTableList(proto); + this.startTs = proto.getStartTs(); + this.completeTs = proto.getCompleteTs(); + loadIncrementalTimestampMap(proto); + loadDependency(proto); + //TODO: merge will be implemented by future jira + LOG.debug("Loaded manifest instance from manifest file: " + + BackupClientUtil.getPath(subFile.getPath())); + return; + } + } + String errorMsg = "No manifest file found in: " + backupPath.toString(); + throw new IOException(errorMsg); + + } catch (IOException e) { + throw new BackupException(e.getMessage()); + } + } + + private void loadIncrementalTimestampMap(BackupProtos.BackupManifest proto) { + List list = proto.getTstMapList(); + if(list == null || list.size() == 0) return; + this.incrTimeRanges = new HashMap>(); + for(BackupProtos.TableServerTimestamp tst: list){ + TableName tn = ProtobufUtil.toTableName(tst.getTable()); + HashMap map = this.incrTimeRanges.get(tn); + if(map == null){ + map = new HashMap(); + this.incrTimeRanges.put(tn, map); + } + List listSt = tst.getServerTimestampList(); + for(BackupProtos.ServerTimestamp stm: listSt) { + map.put(stm.getServer(), stm.getTimestamp()); + } + } + } + + private void loadDependency(BackupProtos.BackupManifest proto) { + if(LOG.isDebugEnabled()) { + LOG.debug("load dependency for: "+proto.getBackupId()); + } + + dependency = new HashMap(); + List list = proto.getDependentBackupImageList(); + for (BackupProtos.BackupImage im : list) { + BackupImage bim = BackupImage.fromProto(im); + if(im.getBackupId() != null){ + dependency.put(im.getBackupId(), bim); + } else{ + LOG.warn("Load dependency for backup manifest: "+ backupId+ + ". Null backup id in dependent image"); + } + } + } + + private void loadTableList(BackupProtos.BackupManifest proto) { + this.tableList = new ArrayList(); + List list = proto.getTableListList(); + for (HBaseProtos.TableName name: list) { + this.tableList.add(ProtobufUtil.toTableName(name)); + } + } + + public BackupType getType() { + return type; + } + + public void setType(BackupType type) { + this.type = type; + } + + /** + * Loads table list. + * @param tableList Table list + */ + private void loadTableList(List tableList) { + + this.tableList = this.getTableList(); + if (this.tableList.size() > 0) { + this.tableList.clear(); + } + for (int i = 0; i < tableList.size(); i++) { + this.tableList.add(tableList.get(i)); + } + + LOG.debug(tableList.size() + " tables exist in table set."); + } + + /** + * Get the table set of this image. + * @return The table set list + */ + public ArrayList getTableList() { + if (this.tableList == null) { + this.tableList = new ArrayList(); + } + return this.tableList; + } + + /** + * Persist the manifest file. + * @throws IOException IOException when storing the manifest file. + */ + + public void store(Configuration conf) throws BackupException { + byte[] data = toByteArray(); + + // write the file, overwrite if already exist + Path manifestFilePath = + new Path(new Path((this.tableBackupDir != null ? this.tableBackupDir : this.logBackupDir)) + ,MANIFEST_FILE_NAME); + try { + FSDataOutputStream out = + manifestFilePath.getFileSystem(conf).create(manifestFilePath, true); + out.write(data); + out.close(); + } catch (IOException e) { + throw new BackupException(e.getMessage()); + } + + LOG.info("Manifest file stored to " + manifestFilePath); + } + + /** + * Protobuf serialization + * @return The filter serialized using pb + */ + public byte[] toByteArray() { + BackupProtos.BackupManifest.Builder builder = BackupProtos.BackupManifest.newBuilder(); + builder.setVersion(this.version); + builder.setBackupId(this.backupId); + builder.setType(BackupProtos.BackupType.valueOf(this.type.name())); + setTableList(builder); + builder.setStartTs(this.startTs); + builder.setCompleteTs(this.completeTs); + setIncrementalTimestampMap(builder); + setDependencyMap(builder); + return builder.build().toByteArray(); + } + + private void setIncrementalTimestampMap(BackupProtos.BackupManifest.Builder builder) { + if (this.incrTimeRanges == null) { + return; + } + for (Entry> entry: this.incrTimeRanges.entrySet()) { + TableName key = entry.getKey(); + HashMap value = entry.getValue(); + BackupProtos.TableServerTimestamp.Builder tstBuilder = + BackupProtos.TableServerTimestamp.newBuilder(); + tstBuilder.setTable(ProtobufUtil.toProtoTableName(key)); + + for (String s : value.keySet()) { + BackupProtos.ServerTimestamp.Builder stBuilder = BackupProtos.ServerTimestamp.newBuilder(); + stBuilder.setServer(s); + stBuilder.setTimestamp(value.get(s)); + tstBuilder.addServerTimestamp(stBuilder.build()); + } + builder.addTstMap(tstBuilder.build()); + } + } + + private void setDependencyMap(BackupProtos.BackupManifest.Builder builder) { + for (BackupImage image: getDependency().values()) { + builder.addDependentBackupImage(image.toProto()); + } + } + + private void setTableList(BackupProtos.BackupManifest.Builder builder) { + for(TableName name: tableList){ + builder.addTableList(ProtobufUtil.toProtoTableName(name)); + } + } + + /** + * Parse protobuf from byte array + * @param pbBytes A pb serialized BackupManifest instance + * @return An instance of made from bytes + * @throws DeserializationException + */ + private static BackupProtos.BackupManifest parseFrom(final byte[] pbBytes) + throws DeserializationException { + BackupProtos.BackupManifest proto; + try { + proto = BackupProtos.BackupManifest.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return proto; + } + + /** + * Get manifest file version + * @return version + */ + public String getVersion() { + return version; + } + + /** + * Get this backup image. + * @return the backup image. + */ + public BackupImage getBackupImage() { + return this.getDependency().get(this.backupId); + } + + /** + * Add dependent backup image for this backup. + * @param image The direct dependent backup image + */ + public void addDependentImage(BackupImage image) { + this.getDependency().get(this.backupId).addAncestor(image); + this.setDependencyMap(this.getDependency(), image); + } + + + + /** + * Get all dependent backup images. The image of this backup is also contained. + * @return The dependent backup images map + */ + public Map getDependency() { + if (this.dependency == null) { + this.dependency = new HashMap(); + LOG.debug(this.rootDir + " " + this.backupId + " " + this.type); + this.dependency.put(this.backupId, + new BackupImage(this.backupId, this.type, this.rootDir, tableList, this.startTs, + this.completeTs)); + } + return this.dependency; + } + + /** + * Set the incremental timestamp map directly. + * @param incrTimestampMap timestamp map + */ + public void setIncrTimestampMap(HashMap> incrTimestampMap) { + this.incrTimeRanges = incrTimestampMap; + } + + + public Map> getIncrTimestampMap() { + if (this.incrTimeRanges == null) { + this.incrTimeRanges = new HashMap>(); + } + return this.incrTimeRanges; + } + + + /** + * Get the image list of this backup for restore in time order. + * @param reverse If true, then output in reverse order, otherwise in time order from old to new + * @return the backup image list for restore in time order + */ + public ArrayList getRestoreDependentList(boolean reverse) { + TreeMap restoreImages = new TreeMap(); + for (BackupImage image : this.getDependency().values()) { + restoreImages.put(Long.valueOf(image.startTs), image); + } + return new ArrayList(reverse ? (restoreImages.descendingMap().values()) + : (restoreImages.values())); + } + + /** + * Get the dependent image list for a specific table of this backup in time order from old to new + * if want to restore to this backup image level. + * @param table table + * @return the backup image list for a table in time order + */ + public ArrayList getDependentListByTable(TableName table) { + ArrayList tableImageList = new ArrayList(); + ArrayList imageList = getRestoreDependentList(true); + for (BackupImage image : imageList) { + if (image.hasTable(table)) { + tableImageList.add(image); + if (image.getType() == BackupType.FULL) { + break; + } + } + } + Collections.reverse(tableImageList); + return tableImageList; + } + + /** + * Get the full dependent image list in the whole dependency scope for a specific table of this + * backup in time order from old to new. + * @param table table + * @return the full backup image list for a table in time order in the whole scope of the + * dependency of this image + */ + public ArrayList getAllDependentListByTable(TableName table) { + ArrayList tableImageList = new ArrayList(); + ArrayList imageList = getRestoreDependentList(false); + for (BackupImage image : imageList) { + if (image.hasTable(table)) { + tableImageList.add(image); + } + } + return tableImageList; + } + + + /** + * Recursively set the dependency map of the backup images. + * @param map The dependency map + * @param image The backup image + */ + private void setDependencyMap(Map map, BackupImage image) { + if (image == null) { + return; + } else { + map.put(image.getBackupId(), image); + for (BackupImage img : image.getAncestors()) { + setDependencyMap(map, img); + } + } + } + + /** + * Check whether backup image1 could cover backup image2 or not. + * @param image1 backup image 1 + * @param image2 backup image 2 + * @return true if image1 can cover image2, otherwise false + */ + public static boolean canCoverImage(BackupImage image1, BackupImage image2) { + // image1 can cover image2 only when the following conditions are satisfied: + // - image1 must not be an incremental image; + // - image1 must be taken after image2 has been taken; + // - table set of image1 must cover the table set of image2. + if (image1.getType() == BackupType.INCREMENTAL) { + return false; + } + if (image1.getStartTs() < image2.getStartTs()) { + return false; + } + List image1TableList = image1.getTableNames(); + List image2TableList = image2.getTableNames(); + boolean found = false; + for (int i = 0; i < image2TableList.size(); i++) { + found = false; + for (int j = 0; j < image1TableList.size(); j++) { + if (image2TableList.get(i).equals(image1TableList.get(j))) { + found = true; + break; + } + } + if (!found) { + return false; + } + } + + LOG.debug("Backup image " + image1.getBackupId() + " can cover " + image2.getBackupId()); + return true; + } + + /** + * Check whether backup image set could cover a backup image or not. + * @param fullImages The backup image set + * @param image The target backup image + * @return true if fullImages can cover image, otherwise false + */ + public static boolean canCoverImage(ArrayList fullImages, BackupImage image) { + // fullImages can cover image only when the following conditions are satisfied: + // - each image of fullImages must not be an incremental image; + // - each image of fullImages must be taken after image has been taken; + // - sum table set of fullImages must cover the table set of image. + for (BackupImage image1 : fullImages) { + if (image1.getType() == BackupType.INCREMENTAL) { + return false; + } + if (image1.getStartTs() < image.getStartTs()) { + return false; + } + } + + ArrayList image1TableList = new ArrayList(); + for (BackupImage image1 : fullImages) { + List tableList = image1.getTableNames(); + for (TableName table : tableList) { + image1TableList.add(table.getNameAsString()); + } + } + ArrayList image2TableList = new ArrayList(); + List tableList = image.getTableNames(); + for (TableName table : tableList) { + image2TableList.add(table.getNameAsString()); + } + + for (int i = 0; i < image2TableList.size(); i++) { + if (image1TableList.contains(image2TableList.get(i)) == false) { + return false; + } + } + + LOG.debug("Full image set can cover image " + image.getBackupId()); + return true; + } + + public BackupInfo toBackupInfo() + { + BackupInfo info = new BackupInfo(); + info.setType(type); + TableName[] tables = new TableName[tableList.size()]; + info.addTables(getTableList().toArray(tables)); + info.setBackupId(backupId); + info.setStartTs(startTs); + info.setTargetRootDir(rootDir); + if(type == BackupType.INCREMENTAL) { + info.setHlogTargetDir(logBackupDir); + } + return info; + } +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java index d6be98c..ccb1894 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -280,6 +280,19 @@ public final class BackupSystemTable implements Closeable { return getBackupHistory(false); } + public ArrayList getBackupHistoryForTable(TableName table) throws IOException { + ArrayList history = getBackupHistory(); + ArrayList list = new ArrayList(); + + for(int i=0; i < history.size(); i++){ + BackupInfo info = history.get(i); + if(info.getTableNames().contains(table)){ + list.add(history.get(i)); + } + } + return list; + } + /** * Get all backup session with a given status (in desc order by time) * @param status status diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java index 20abba3..28629b3 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.net.URLDecoder; import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -41,6 +42,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.impl.BackupManifest; import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -365,4 +367,80 @@ public final class BackupClientUtil { + HConstants.HREGION_LOGDIR_NAME; } + public static List getHistory(Configuration conf, Path backupRootPath) + throws IOException { + // Get all (n) history from backup root destination + FileSystem fs = FileSystem.get(conf); + RemoteIterator it = fs.listLocatedStatus(backupRootPath); + + List infos = new ArrayList(); + while( it.hasNext()) { + LocatedFileStatus lfs = it.next(); + if(!lfs.isDirectory()) continue; + if(!isBackupDirectory(lfs)) continue; + String backupId = lfs.getPath().getName(); + infos.add(loadBackupInfo(backupRootPath, backupId, fs)); + } + // Sort + Collections.sort(infos, new Comparator() { + + @Override + public int compare(BackupInfo o1, BackupInfo o2) { + long ts1 = getTimestamp(o1.getBackupId()); + long ts2 = getTimestamp(o2.getBackupId()); + if(ts1 == ts2) return 0; + return ts1< ts2 ? 1: -1 ; + } + + private long getTimestamp(String backupId) { + String[] split = backupId.split("_"); + return Long.parseLong(split[1]); + } + }); + return infos; + } + + public static List getHistory(Configuration conf, int n, + TableName name, Path backupRootPath) throws IOException + { + List infos = getHistory(conf, backupRootPath); + if (name == null) { + if(infos.size() <= n) return infos; + return infos.subList(0, n); + } else { + List ret = new ArrayList(); + int count = 0; + for(BackupInfo info: infos) { + List names = info.getTableNames(); + if(names.contains(name)) { + ret.add(info); + if(++count == n) { + break; + } + } + } + return ret; + } + } + + private static boolean isBackupDirectory(LocatedFileStatus lfs) { + return lfs.getPath().getName().startsWith(BackupRestoreConstants.BACKUPID_PREFIX); + } + + public static BackupInfo loadBackupInfo(Path backupRootPath, String backupId, + FileSystem fs) throws IOException { + Path backupPath = new Path(backupRootPath, backupId); + + RemoteIterator it = fs.listFiles(backupPath, true); + while(it.hasNext()) { + LocatedFileStatus lfs = it.next(); + if(lfs.getPath().getName().equals(BackupManifest.MANIFEST_FILE_NAME)) { + // Load BackupManifest + BackupManifest manifest = new BackupManifest(fs, lfs.getPath().getParent()); + BackupInfo info = manifest.toBackupInfo(); + return info; + } + } + return null; + } } diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java index 7a411cb..4134cc8 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.List; import java.util.concurrent.Future; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.BackupRequest; @@ -104,6 +105,17 @@ public interface BackupAdmin extends Closeable{ */ public List getHistory(int n) throws IOException; + + /** + * Show backup history command for a table + * @param n - last n backup sessions + * @param name - table's name + * @return list of backup infos + * @throws IOException exception + */ + public List getHistory(int n, TableName name) throws IOException; + + /** * Backup sets list command - list all backup sets. Backup set is * a named group of tables. diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java index 81413c6..6d11e69 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java @@ -25,6 +25,7 @@ import java.util.concurrent.Future; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; @@ -139,6 +140,16 @@ public class HBaseBackupAdmin implements BackupAdmin { } @Override + public List getHistory(int n, TableName name) throws IOException { + if(name == null) return getHistory(n); + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + List history = table.getBackupHistoryForTable(name); + n = Math.min(n, history.size()); + return history.subList(0, n); + } + } + + @Override public List listBackupSets() throws IOException { try (final BackupSystemTable table = new BackupSystemTable(conn)) { List list = table.listBackupSets(); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java index f692bd0..8eb2ff8 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java @@ -56,6 +56,8 @@ public class BackupDriver extends AbstractHBaseTool { addOptWithArg("w", "Number of workers"); addOptWithArg("n", "History length"); addOptWithArg("set", "Backup set name"); + addOptWithArg("path", "Backup destination root directory path"); + // disable irrelevant loggers to avoid it mess up command output LogUtils.disableUselessLoggers(LOG); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java deleted file mode 100644 index 007b226..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java +++ /dev/null @@ -1,758 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup.impl; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.TreeMap; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupInfo; -import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.backup.util.BackupClientUtil; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; - -import com.google.protobuf.InvalidProtocolBufferException; - - -/** - * Backup manifest Contains all the meta data of a backup image. The manifest info will be bundled - * as manifest file together with data. So that each backup image will contain all the info needed - * for restore. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class BackupManifest { - - private static final Log LOG = LogFactory.getLog(BackupManifest.class); - - // manifest file name - public static final String MANIFEST_FILE_NAME = ".backup.manifest"; - - // manifest file version, current is 1.0 - public static final String MANIFEST_VERSION = "1.0"; - - // backup image, the dependency graph is made up by series of backup images - - public static class BackupImage implements Comparable { - - private String backupId; - private BackupType type; - private String rootDir; - private List tableList; - private long startTs; - private long completeTs; - private ArrayList ancestors; - - public BackupImage() { - super(); - } - - public BackupImage(String backupId, BackupType type, String rootDir, - List tableList, long startTs, long completeTs) { - this.backupId = backupId; - this.type = type; - this.rootDir = rootDir; - this.tableList = tableList; - this.startTs = startTs; - this.completeTs = completeTs; - } - - static BackupImage fromProto(BackupProtos.BackupImage im) { - String backupId = im.getBackupId(); - String rootDir = im.getRootDir(); - long startTs = im.getStartTs(); - long completeTs = im.getCompleteTs(); - List tableListList = im.getTableListList(); - List tableList = new ArrayList(); - for(HBaseProtos.TableName tn : tableListList) { - tableList.add(ProtobufUtil.toTableName(tn)); - } - - List ancestorList = im.getAncestorsList(); - - BackupType type = - im.getBackupType() == BackupProtos.BackupType.FULL ? BackupType.FULL: - BackupType.INCREMENTAL; - - BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs); - for(BackupProtos.BackupImage img: ancestorList) { - image.addAncestor(fromProto(img)); - } - return image; - } - - BackupProtos.BackupImage toProto() { - BackupProtos.BackupImage.Builder builder = BackupProtos.BackupImage.newBuilder(); - builder.setBackupId(backupId); - builder.setCompleteTs(completeTs); - builder.setStartTs(startTs); - builder.setRootDir(rootDir); - if (type == BackupType.FULL) { - builder.setBackupType(BackupProtos.BackupType.FULL); - } else{ - builder.setBackupType(BackupProtos.BackupType.INCREMENTAL); - } - - for (TableName name: tableList) { - builder.addTableList(ProtobufUtil.toProtoTableName(name)); - } - - if (ancestors != null){ - for (BackupImage im: ancestors){ - builder.addAncestors(im.toProto()); - } - } - - return builder.build(); - } - - public String getBackupId() { - return backupId; - } - - public void setBackupId(String backupId) { - this.backupId = backupId; - } - - public BackupType getType() { - return type; - } - - public void setType(BackupType type) { - this.type = type; - } - - public String getRootDir() { - return rootDir; - } - - public void setRootDir(String rootDir) { - this.rootDir = rootDir; - } - - public List getTableNames() { - return tableList; - } - - public void setTableList(List tableList) { - this.tableList = tableList; - } - - public long getStartTs() { - return startTs; - } - - public void setStartTs(long startTs) { - this.startTs = startTs; - } - - public long getCompleteTs() { - return completeTs; - } - - public void setCompleteTs(long completeTs) { - this.completeTs = completeTs; - } - - public ArrayList getAncestors() { - if (this.ancestors == null) { - this.ancestors = new ArrayList(); - } - return this.ancestors; - } - - public void addAncestor(BackupImage backupImage) { - this.getAncestors().add(backupImage); - } - - public boolean hasAncestor(String token) { - for (BackupImage image : this.getAncestors()) { - if (image.getBackupId().equals(token)) { - return true; - } - } - return false; - } - - public boolean hasTable(TableName table) { - for (TableName t : tableList) { - if (t.equals(table)) { - return true; - } - } - return false; - } - - @Override - public int compareTo(BackupImage other) { - String thisBackupId = this.getBackupId(); - String otherBackupId = other.getBackupId(); - Long thisTS = new Long(thisBackupId.substring(thisBackupId.lastIndexOf("_") + 1)); - Long otherTS = new Long(otherBackupId.substring(otherBackupId.lastIndexOf("_") + 1)); - return thisTS.compareTo(otherTS); - } - } - - // manifest version - private String version = MANIFEST_VERSION; - - // hadoop hbase configuration - protected Configuration config = null; - - // backup root directory - private String rootDir = null; - - // backup image directory - private String tableBackupDir = null; - - // backup log directory if this is an incremental backup - private String logBackupDir = null; - - // backup token - private String backupId; - - // backup type, full or incremental - private BackupType type; - - // the table list for the backup - private ArrayList tableList; - - // actual start timestamp of the backup process - private long startTs; - - // actual complete timestamp of the backup process - private long completeTs; - - // the region server timestamp for tables: - // > - private Map> incrTimeRanges; - - // dependency of this backup, including all the dependent images to do PIT recovery - private Map dependency; - - /** - * Construct manifest for a ongoing backup. - * @param backupCtx The ongoing backup context - */ - public BackupManifest(BackupInfo backupCtx) { - this.backupId = backupCtx.getBackupId(); - this.type = backupCtx.getType(); - this.rootDir = backupCtx.getTargetRootDir(); - if (this.type == BackupType.INCREMENTAL) { - this.logBackupDir = backupCtx.getHLogTargetDir(); - } - this.startTs = backupCtx.getStartTs(); - this.completeTs = backupCtx.getEndTs(); - this.loadTableList(backupCtx.getTableNames()); - } - - - /** - * Construct a table level manifest for a backup of the named table. - * @param backupCtx The ongoing backup context - */ - public BackupManifest(BackupInfo backupCtx, TableName table) { - this.backupId = backupCtx.getBackupId(); - this.type = backupCtx.getType(); - this.rootDir = backupCtx.getTargetRootDir(); - this.tableBackupDir = backupCtx.getBackupStatus(table).getTargetDir(); - if (this.type == BackupType.INCREMENTAL) { - this.logBackupDir = backupCtx.getHLogTargetDir(); - } - this.startTs = backupCtx.getStartTs(); - this.completeTs = backupCtx.getEndTs(); - List tables = new ArrayList(); - tables.add(table); - this.loadTableList(tables); - } - - /** - * Construct manifest from a backup directory. - * @param conf configuration - * @param backupPath backup path - * @throws BackupException exception - */ - - public BackupManifest(Configuration conf, Path backupPath) throws BackupException { - if (LOG.isDebugEnabled()) { - LOG.debug("Loading manifest from: " + backupPath.toString()); - } - // The input backupDir may not exactly be the backup table dir. - // It could be the backup log dir where there is also a manifest file stored. - // This variable's purpose is to keep the correct and original location so - // that we can store/persist it. - this.tableBackupDir = backupPath.toString(); - this.config = conf; - try { - - FileSystem fs = backupPath.getFileSystem(conf); - FileStatus[] subFiles = BackupClientUtil.listStatus(fs, backupPath, null); - if (subFiles == null) { - String errorMsg = backupPath.toString() + " does not exist"; - LOG.error(errorMsg); - throw new IOException(errorMsg); - } - for (FileStatus subFile : subFiles) { - if (subFile.getPath().getName().equals(MANIFEST_FILE_NAME)) { - - // load and set manifest field from file content - FSDataInputStream in = fs.open(subFile.getPath()); - long len = subFile.getLen(); - byte[] pbBytes = new byte[(int) len]; - in.readFully(pbBytes); - BackupProtos.BackupManifest proto = null; - try{ - proto = parseFrom(pbBytes); - } catch(Exception e){ - throw new BackupException(e); - } - this.version = proto.getVersion(); - this.backupId = proto.getBackupId(); - this.type = BackupType.valueOf(proto.getType().name()); - // Here the parameter backupDir is where the manifest file is. - // There should always be a manifest file under: - // backupRootDir/namespace/table/backupId/.backup.manifest - this.rootDir = backupPath.getParent().getParent().getParent().toString(); - - Path p = backupPath.getParent(); - if (p.getName().equals(HConstants.HREGION_LOGDIR_NAME)) { - this.rootDir = p.getParent().toString(); - } else { - this.rootDir = p.getParent().getParent().toString(); - } - - loadTableList(proto); - this.startTs = proto.getStartTs(); - this.completeTs = proto.getCompleteTs(); - loadIncrementalTimestampMap(proto); - loadDependency(proto); - //TODO: merge will be implemented by future jira - LOG.debug("Loaded manifest instance from manifest file: " - + BackupClientUtil.getPath(subFile.getPath())); - return; - } - } - String errorMsg = "No manifest file found in: " + backupPath.toString(); - throw new IOException(errorMsg); - - } catch (IOException e) { - throw new BackupException(e.getMessage()); - } - } - - private void loadIncrementalTimestampMap(BackupProtos.BackupManifest proto) { - List list = proto.getTstMapList(); - if(list == null || list.size() == 0) return; - this.incrTimeRanges = new HashMap>(); - for(BackupProtos.TableServerTimestamp tst: list){ - TableName tn = ProtobufUtil.toTableName(tst.getTable()); - HashMap map = this.incrTimeRanges.get(tn); - if(map == null){ - map = new HashMap(); - this.incrTimeRanges.put(tn, map); - } - List listSt = tst.getServerTimestampList(); - for(BackupProtos.ServerTimestamp stm: listSt) { - map.put(stm.getServer(), stm.getTimestamp()); - } - } - } - - private void loadDependency(BackupProtos.BackupManifest proto) { - if(LOG.isDebugEnabled()) { - LOG.debug("load dependency for: "+proto.getBackupId()); - } - - dependency = new HashMap(); - List list = proto.getDependentBackupImageList(); - for (BackupProtos.BackupImage im : list) { - BackupImage bim = BackupImage.fromProto(im); - if(im.getBackupId() != null){ - dependency.put(im.getBackupId(), bim); - } else{ - LOG.warn("Load dependency for backup manifest: "+ backupId+ - ". Null backup id in dependent image"); - } - } - } - - private void loadTableList(BackupProtos.BackupManifest proto) { - this.tableList = new ArrayList(); - List list = proto.getTableListList(); - for (HBaseProtos.TableName name: list) { - this.tableList.add(ProtobufUtil.toTableName(name)); - } - } - - public BackupType getType() { - return type; - } - - public void setType(BackupType type) { - this.type = type; - } - - /** - * Loads table list. - * @param tableList Table list - */ - private void loadTableList(List tableList) { - - this.tableList = this.getTableList(); - if (this.tableList.size() > 0) { - this.tableList.clear(); - } - for (int i = 0; i < tableList.size(); i++) { - this.tableList.add(tableList.get(i)); - } - - LOG.debug(tableList.size() + " tables exist in table set."); - } - - /** - * Get the table set of this image. - * @return The table set list - */ - public ArrayList getTableList() { - if (this.tableList == null) { - this.tableList = new ArrayList(); - } - return this.tableList; - } - - /** - * Persist the manifest file. - * @throws IOException IOException when storing the manifest file. - */ - - public void store(Configuration conf) throws BackupException { - byte[] data = toByteArray(); - - // write the file, overwrite if already exist - Path manifestFilePath = - new Path(new Path((this.tableBackupDir != null ? this.tableBackupDir : this.logBackupDir)) - ,MANIFEST_FILE_NAME); - try { - FSDataOutputStream out = - manifestFilePath.getFileSystem(conf).create(manifestFilePath, true); - out.write(data); - out.close(); - } catch (IOException e) { - throw new BackupException(e.getMessage()); - } - - LOG.info("Manifest file stored to " + manifestFilePath); - } - - /** - * Protobuf serialization - * @return The filter serialized using pb - */ - public byte[] toByteArray() { - BackupProtos.BackupManifest.Builder builder = BackupProtos.BackupManifest.newBuilder(); - builder.setVersion(this.version); - builder.setBackupId(this.backupId); - builder.setType(BackupProtos.BackupType.valueOf(this.type.name())); - setTableList(builder); - builder.setStartTs(this.startTs); - builder.setCompleteTs(this.completeTs); - setIncrementalTimestampMap(builder); - setDependencyMap(builder); - return builder.build().toByteArray(); - } - - private void setIncrementalTimestampMap(BackupProtos.BackupManifest.Builder builder) { - if (this.incrTimeRanges == null) { - return; - } - for (Entry> entry: this.incrTimeRanges.entrySet()) { - TableName key = entry.getKey(); - HashMap value = entry.getValue(); - BackupProtos.TableServerTimestamp.Builder tstBuilder = - BackupProtos.TableServerTimestamp.newBuilder(); - tstBuilder.setTable(ProtobufUtil.toProtoTableName(key)); - - for (String s : value.keySet()) { - BackupProtos.ServerTimestamp.Builder stBuilder = BackupProtos.ServerTimestamp.newBuilder(); - stBuilder.setServer(s); - stBuilder.setTimestamp(value.get(s)); - tstBuilder.addServerTimestamp(stBuilder.build()); - } - builder.addTstMap(tstBuilder.build()); - } - } - - private void setDependencyMap(BackupProtos.BackupManifest.Builder builder) { - for (BackupImage image: getDependency().values()) { - builder.addDependentBackupImage(image.toProto()); - } - } - - private void setTableList(BackupProtos.BackupManifest.Builder builder) { - for(TableName name: tableList){ - builder.addTableList(ProtobufUtil.toProtoTableName(name)); - } - } - - /** - * Parse protobuf from byte array - * @param pbBytes A pb serialized BackupManifest instance - * @return An instance of made from bytes - * @throws DeserializationException - */ - private static BackupProtos.BackupManifest parseFrom(final byte[] pbBytes) - throws DeserializationException { - BackupProtos.BackupManifest proto; - try { - proto = BackupProtos.BackupManifest.parseFrom(pbBytes); - } catch (InvalidProtocolBufferException e) { - throw new DeserializationException(e); - } - return proto; - } - - /** - * Get manifest file version - * @return version - */ - public String getVersion() { - return version; - } - - /** - * Get this backup image. - * @return the backup image. - */ - public BackupImage getBackupImage() { - return this.getDependency().get(this.backupId); - } - - /** - * Add dependent backup image for this backup. - * @param image The direct dependent backup image - */ - public void addDependentImage(BackupImage image) { - this.getDependency().get(this.backupId).addAncestor(image); - this.setDependencyMap(this.getDependency(), image); - } - - - - /** - * Get all dependent backup images. The image of this backup is also contained. - * @return The dependent backup images map - */ - public Map getDependency() { - if (this.dependency == null) { - this.dependency = new HashMap(); - LOG.debug(this.rootDir + " " + this.backupId + " " + this.type); - this.dependency.put(this.backupId, - new BackupImage(this.backupId, this.type, this.rootDir, tableList, this.startTs, - this.completeTs)); - } - return this.dependency; - } - - /** - * Set the incremental timestamp map directly. - * @param incrTimestampMap timestamp map - */ - public void setIncrTimestampMap(HashMap> incrTimestampMap) { - this.incrTimeRanges = incrTimestampMap; - } - - - public Map> getIncrTimestampMap() { - if (this.incrTimeRanges == null) { - this.incrTimeRanges = new HashMap>(); - } - return this.incrTimeRanges; - } - - - /** - * Get the image list of this backup for restore in time order. - * @param reverse If true, then output in reverse order, otherwise in time order from old to new - * @return the backup image list for restore in time order - */ - public ArrayList getRestoreDependentList(boolean reverse) { - TreeMap restoreImages = new TreeMap(); - for (BackupImage image : this.getDependency().values()) { - restoreImages.put(Long.valueOf(image.startTs), image); - } - return new ArrayList(reverse ? (restoreImages.descendingMap().values()) - : (restoreImages.values())); - } - - /** - * Get the dependent image list for a specific table of this backup in time order from old to new - * if want to restore to this backup image level. - * @param table table - * @return the backup image list for a table in time order - */ - public ArrayList getDependentListByTable(TableName table) { - ArrayList tableImageList = new ArrayList(); - ArrayList imageList = getRestoreDependentList(true); - for (BackupImage image : imageList) { - if (image.hasTable(table)) { - tableImageList.add(image); - if (image.getType() == BackupType.FULL) { - break; - } - } - } - Collections.reverse(tableImageList); - return tableImageList; - } - - /** - * Get the full dependent image list in the whole dependency scope for a specific table of this - * backup in time order from old to new. - * @param table table - * @return the full backup image list for a table in time order in the whole scope of the - * dependency of this image - */ - public ArrayList getAllDependentListByTable(TableName table) { - ArrayList tableImageList = new ArrayList(); - ArrayList imageList = getRestoreDependentList(false); - for (BackupImage image : imageList) { - if (image.hasTable(table)) { - tableImageList.add(image); - } - } - return tableImageList; - } - - - /** - * Recursively set the dependency map of the backup images. - * @param map The dependency map - * @param image The backup image - */ - private void setDependencyMap(Map map, BackupImage image) { - if (image == null) { - return; - } else { - map.put(image.getBackupId(), image); - for (BackupImage img : image.getAncestors()) { - setDependencyMap(map, img); - } - } - } - - /** - * Check whether backup image1 could cover backup image2 or not. - * @param image1 backup image 1 - * @param image2 backup image 2 - * @return true if image1 can cover image2, otherwise false - */ - public static boolean canCoverImage(BackupImage image1, BackupImage image2) { - // image1 can cover image2 only when the following conditions are satisfied: - // - image1 must not be an incremental image; - // - image1 must be taken after image2 has been taken; - // - table set of image1 must cover the table set of image2. - if (image1.getType() == BackupType.INCREMENTAL) { - return false; - } - if (image1.getStartTs() < image2.getStartTs()) { - return false; - } - List image1TableList = image1.getTableNames(); - List image2TableList = image2.getTableNames(); - boolean found = false; - for (int i = 0; i < image2TableList.size(); i++) { - found = false; - for (int j = 0; j < image1TableList.size(); j++) { - if (image2TableList.get(i).equals(image1TableList.get(j))) { - found = true; - break; - } - } - if (!found) { - return false; - } - } - - LOG.debug("Backup image " + image1.getBackupId() + " can cover " + image2.getBackupId()); - return true; - } - - /** - * Check whether backup image set could cover a backup image or not. - * @param fullImages The backup image set - * @param image The target backup image - * @return true if fullImages can cover image, otherwise false - */ - public static boolean canCoverImage(ArrayList fullImages, BackupImage image) { - // fullImages can cover image only when the following conditions are satisfied: - // - each image of fullImages must not be an incremental image; - // - each image of fullImages must be taken after image has been taken; - // - sum table set of fullImages must cover the table set of image. - for (BackupImage image1 : fullImages) { - if (image1.getType() == BackupType.INCREMENTAL) { - return false; - } - if (image1.getStartTs() < image.getStartTs()) { - return false; - } - } - - ArrayList image1TableList = new ArrayList(); - for (BackupImage image1 : fullImages) { - List tableList = image1.getTableNames(); - for (TableName table : tableList) { - image1TableList.add(table.getNameAsString()); - } - } - ArrayList image2TableList = new ArrayList(); - List tableList = image.getTableNames(); - for (TableName table : tableList) { - image2TableList.add(table.getNameAsString()); - } - - for (int i = 0; i < image2TableList.size(); i++) { - if (image1TableList.contains(image2TableList.get(i)) == false) { - return false; - } - } - - LOG.debug("Full image set can cover image " + image.getBackupId()); - return true; - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/FullTableBackupProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/FullTableBackupProcedure.java index a7cfd8a..7c832e4 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/FullTableBackupProcedure.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/FullTableBackupProcedure.java @@ -33,7 +33,9 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.TableName; @@ -321,8 +323,7 @@ public class FullTableBackupProcedure throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3] + " with reason code " + res); } - - LOG.info("Snapshot copy " + args[1] + " finished."); + LOG.info("Snapshot copy " + args[1] + " finished."); } } @@ -575,7 +576,7 @@ public class FullTableBackupProcedure // do snapshot copy LOG.debug("snapshot copy for " + backupId); try { - this.snapshotCopy(backupContext); + this.snapshotCopy(backupContext); } catch (Exception e) { setFailure("Failure in full-backup: snapshot copy phase" + backupId, e); // fail the overall backup and return diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index da1c6c1..492a0f2 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -84,10 +84,6 @@ public class TestBackupBase { protected static String BACKUP_ROOT_DIR = "/backupUT"; protected static String BACKUP_REMOTE_ROOT_DIR = "/backupUT"; - protected static final String BACKUP_ZNODE = "/backup/hbase"; - protected static final String BACKUP_SUCCEED_NODE = "complete"; - protected static final String BACKUP_FAILED_NODE = "failed"; - /** * @throws java.lang.Exception */ diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java index a7d2750..145a060 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java @@ -89,4 +89,34 @@ public class TestBackupShowHistory extends TestBackupBase { LOG.info(baos.toString()); assertTrue(output.indexOf(backupId) > 0); } + + + @Test + public void testBackupHistoryOneTable() throws Exception { + + LOG.info("test backup history on a single table with data"); + + List tableList = Lists.newArrayList(table1); + String backupId1 = fullTableBackup(tableList); + assertTrue(checkSucceeded(backupId1)); + LOG.info("backup complete: "+table1); + + tableList = Lists.newArrayList(table2); + String backupId2 = fullTableBackup(tableList); + assertTrue(checkSucceeded(backupId2)); + LOG.info("backup complete: "+ table2); + + List history = getBackupAdmin().getHistory(10, table1); + assertTrue(history.size() > 0); + boolean success = true; + for(BackupInfo info: history){ + if(!info.getTableNames().contains(table1)){ + success = false; break; + } + } + assertTrue(success); + LOG.info("show_history"); + + } + } \ No newline at end of file diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistoryFromBackupDestination.java hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistoryFromBackupDestination.java new file mode 100644 index 0000000..512e737 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistoryFromBackupDestination.java @@ -0,0 +1,126 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestBackupShowHistoryFromBackupDestination extends TestBackupBase { + + private static final Log LOG = + LogFactory.getLog(TestBackupShowHistoryFromBackupDestination.class); + + /** + * Verify that full backup is created on a single table with data correctly. + * Verify that history works as expected + * @throws Exception + */ + @Test + public void testBackupHistory() throws Exception { + + LOG.info("test backup history on a single table with data"); + + List tableList = Lists.newArrayList(table1); + String backupId = fullTableBackup(tableList); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete"); + + List history = BackupClientUtil.getHistory(conf1, 10, null, + new Path(BACKUP_ROOT_DIR)); + assertTrue(history.size() > 0); + boolean success = false; + for(BackupInfo info: history){ + if(info.getBackupId().equals(backupId)){ + success = true; break; + } + } + assertTrue(success); + LOG.info("show_history"); + + } + + @Test + public void testBackupHistoryCommand() throws Exception { + + LOG.info("test backup history on a single table with data: command-line"); + + List tableList = Lists.newArrayList(table1); + String backupId = fullTableBackup(tableList); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete"); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + System.setOut(new PrintStream(baos)); + + String[] args = new String[]{"history", "-n", "10", "-path", BACKUP_ROOT_DIR }; + // Run backup + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + LOG.info("show_history"); + String output = baos.toString(); + LOG.info(baos.toString()); + assertTrue(output.indexOf(backupId) > 0); + } + + + @Test + public void testBackupHistoryOneTable() throws Exception { + + LOG.info("test backup history on a single table with data"); + + List tableList = Lists.newArrayList(table1); + String backupId1 = fullTableBackup(tableList); + assertTrue(checkSucceeded(backupId1)); + LOG.info("backup complete: "+table1); + + tableList = Lists.newArrayList(table2); + String backupId2 = fullTableBackup(tableList); + assertTrue(checkSucceeded(backupId2)); + LOG.info("backup complete: "+ table2); + + List history = BackupClientUtil.getHistory(conf1, 10, table1, new Path(BACKUP_ROOT_DIR)); + assertTrue(history.size() > 0); + boolean success = true; + for(BackupInfo info: history){ + if(!info.getTableNames().contains(table1)){ + success = false; break; + } + } + assertTrue(success); + LOG.info("show_history"); + + } + +} \ No newline at end of file