diff --git a/bin/hbase b/bin/hbase index 7faaa26..83b91c0 100755 --- a/bin/hbase +++ b/bin/hbase @@ -101,6 +101,8 @@ if [ $# = 0 ]; then echo " ltt Run LoadTestTool" echo " canary Run the Canary tool" echo " version Print the version" + echo " backup backup tables for recovery" + echo " restore restore tables from existing backup image" echo " CLASSNAME Run the class named CLASSNAME" exit 1 fi @@ -313,6 +315,10 @@ elif [ "$COMMAND" = "hfile" ] ; then CLASS='org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter' elif [ "$COMMAND" = "zkcli" ] ; then CLASS="org.apache.hadoop.hbase.zookeeper.ZooKeeperMainServer" +elif [ "$COMMAND" = "backup" ] ; then + CLASS='org.apache.hadoop.hbase.backup.BackupDriver' +elif [ "$COMMAND" = "restore" ] ; then + CLASS='org.apache.hadoop.hbase.backup.RestoreDriver' elif [ "$COMMAND" = "upgrade" ] ; then echo "This command was used to upgrade to HBase 0.96, it was removed in HBase 2.0.0." echo "Please follow the documentation at http://hbase.apache.org/book.html#upgrading." diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 0c6244f..9ce8de3 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1262,6 +1262,14 @@ public final class HConstants { public static final String DEFAULT_TEMPORARY_HDFS_DIRECTORY = "/user/" + System.getProperty("user.name") + "/hbase-staging"; + /** + * Backup/Restore constants + */ + public final static String BACKUP_ENABLE_KEY = "hbase.backup.enable"; + public final static boolean BACKUP_ENABLE_DEFAULT = true; + public final static String BACKUP_SYSTEM_TTL_KEY = "hbase.backup.system.ttl"; + public final static int BACKUP_SYSTEM_TTL_DEFAULT = FOREVER; + private HConstants() { // Can't be instantiated with this ctor. } diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml index 8034576..9098944 100644 --- a/hbase-protocol/pom.xml +++ b/hbase-protocol/pom.xml @@ -171,6 +171,7 @@ Admin.proto Aggregate.proto Authentication.proto + Backup.proto Cell.proto Client.proto ClusterId.proto diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index d5f1e30..e4b296a 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -394,6 +394,11 @@ ${project.version} true + + org.apache.hadoop + hadoop-distcp + ${hadoop-two.version} + commons-httpclient commons-httpclient @@ -407,6 +412,11 @@ commons-collections + org.apache.hadoop + hadoop-distcp + ${hadoop-two.version} + + org.apache.hbase hbase-hadoop-compat diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java index ae36f08..3342743 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java @@ -17,7 +17,11 @@ */ package org.apache.hadoop.hbase.coordination; +import java.io.IOException; + import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs; +import org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.Server; @@ -51,8 +55,21 @@ public abstract class BaseCoordinatedStateManager implements CoordinatedStateMan * Method to retrieve coordination for split log worker */ public abstract SplitLogWorkerCoordination getSplitLogWorkerCoordination(); + /** * Method to retrieve coordination for split log manager */ public abstract SplitLogManagerCoordination getSplitLogManagerCoordination(); + /** + * Method to retrieve {@link org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs} + */ + public abstract ProcedureCoordinatorRpcs + getProcedureCoordinatorRpcs(String procType, String coordNode) throws IOException; + + /** + * Method to retrieve {@link org.apache.hadoop.hbase.procedure.ProcedureMemberRpc} + */ + public abstract ProcedureMemberRpcs + getProcedureMemberRpcs(String procType) throws IOException; + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java index 3e89be7..7cf4aab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java @@ -17,9 +17,15 @@ */ package org.apache.hadoop.hbase.coordination; +import java.io.IOException; + import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs; +import org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs; +import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinatorRpcs; +import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; /** @@ -49,9 +55,21 @@ public class ZkCoordinatedStateManager extends BaseCoordinatedStateManager { @Override public SplitLogWorkerCoordination getSplitLogWorkerCoordination() { return splitLogWorkerCoordination; - } + } + @Override public SplitLogManagerCoordination getSplitLogManagerCoordination() { return splitLogManagerCoordination; } + + @Override + public ProcedureCoordinatorRpcs getProcedureCoordinatorRpcs(String procType, String coordNode) + throws IOException { + return new ZKProcedureCoordinatorRpcs(watcher, procType, coordNode); + } + + @Override + public ProcedureMemberRpcs getProcedureMemberRpcs(String procType) throws IOException { + return new ZKProcedureMemberRpcs(watcher, procType); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index 9d9cee0..2ceeda5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -85,6 +85,9 @@ public class WALPlayer extends Configured implements Tool { private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name"; + public WALPlayer(){ + } + protected WALPlayer(final Configuration c) { super(c); } @@ -94,7 +97,7 @@ public class WALPlayer extends Configured implements Tool { * This one can be used together with {@link KeyValueSortReducer} */ static class WALKeyValueMapper - extends Mapper { + extends Mapper { private byte[] table; @Override @@ -106,7 +109,9 @@ public class WALPlayer extends Configured implements Tool { if (Bytes.equals(table, key.getTablename().getName())) { for (Cell cell : value.getCells()) { KeyValue kv = KeyValueUtil.ensureKeyValue(cell); - if (WALEdit.isMetaEditFamily(kv)) continue; + if (WALEdit.isMetaEditFamily(kv)) { + continue; + } context.write(new ImmutableBytesWritable(CellUtil.cloneRow(kv)), kv); } } @@ -132,7 +137,7 @@ public class WALPlayer extends Configured implements Tool { * a running HBase instance. */ protected static class WALMapper - extends Mapper { + extends Mapper { private Map tables = new TreeMap(); @Override @@ -149,7 +154,9 @@ public class WALPlayer extends Configured implements Tool { Cell lastCell = null; for (Cell cell : value.getCells()) { // filtering WAL meta entries - if (WALEdit.isMetaEditFamily(cell)) continue; + if (WALEdit.isMetaEditFamily(cell)) { + continue; + } // Allow a subclass filter out this cell. if (filter(context, cell)) { @@ -160,8 +167,12 @@ public class WALPlayer extends Configured implements Tool { if (lastCell == null || lastCell.getTypeByte() != cell.getTypeByte() || !CellUtil.matchingRow(lastCell, cell)) { // row or type changed, write out aggregate KVs. - if (put != null) context.write(tableOut, put); - if (del != null) context.write(tableOut, del); + if (put != null) { + context.write(tableOut, put); + } + if (del != null) { + context.write(tableOut, del); + } if (CellUtil.isDelete(cell)) { del = new Delete(CellUtil.cloneRow(cell)); } else { @@ -177,8 +188,12 @@ public class WALPlayer extends Configured implements Tool { lastCell = cell; } // write residual KVs - if (put != null) context.write(tableOut, put); - if (del != null) context.write(tableOut, del); + if (put != null) { + context.write(tableOut, put); + } + if (del != null) { + context.write(tableOut, del); + } } } catch (InterruptedException e) { e.printStackTrace(); @@ -186,7 +201,8 @@ public class WALPlayer extends Configured implements Tool { } /** - * @param cell + * Filter cell + * @param cell cell * @return Return true if we are to emit this cell. */ protected boolean filter(Context context, final Cell cell) { @@ -197,9 +213,7 @@ public class WALPlayer extends Configured implements Tool { public void setup(Context context) throws IOException { String[] tableMap = context.getConfiguration().getStrings(TABLE_MAP_KEY); String[] tablesToUse = context.getConfiguration().getStrings(TABLES_KEY); - if (tablesToUse == null && tableMap == null) { - // Then user wants all tables. - } else if (tablesToUse == null || tableMap == null || tablesToUse.length != tableMap.length) { + if (tablesToUse == null || tableMap == null || tablesToUse.length != tableMap.length) { // this can only happen when WALMapper is used directly by a class other than WALPlayer throw new IOException("No tables or incorrect table mapping specified."); } @@ -215,7 +229,9 @@ public class WALPlayer extends Configured implements Tool { void setupTime(Configuration conf, String option) throws IOException { String val = conf.get(option); - if (null == val) return; + if (null == val) { + return; + } long ms; try { // first try to parse in user friendly form @@ -295,7 +311,8 @@ public class WALPlayer extends Configured implements Tool { return job; } - /* + /** + * Print usage * @param errorMsg Error message. Can be null. */ private void usage(final String errorMsg) { @@ -305,7 +322,8 @@ public class WALPlayer extends Configured implements Tool { System.err.println("Usage: " + NAME + " [options] []"); System.err.println("Read all WAL entries for ."); System.err.println("If no tables (\"\") are specific, all tables are imported."); - System.err.println("(Careful, even -ROOT- and hbase:meta entries will be imported in that case.)"); + System.err.println("(Careful, even -ROOT- and hbase:meta entries will be imported"+ + " in that case.)"); System.err.println("Otherwise is a comma separated list of tables.\n"); System.err.println("The WAL entries can be mapped to new set of tables via ."); System.err.println(" is a command separated list of targettables."); @@ -318,10 +336,10 @@ public class WALPlayer extends Configured implements Tool { System.err.println(" -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]"); System.err.println(" -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]"); System.err.println(" -D " + JOB_NAME_CONF_KEY - + "=jobName - use the specified mapreduce job name for the wal player"); + + "=jobName - use the specified mapreduce job name for the wal player"); System.err.println("For performance also consider the following options:\n" - + " -Dmapreduce.map.speculative=false\n" - + " -Dmapreduce.reduce.speculative=false"); + + " -Dmapreduce.map.speculative=false\n" + + " -Dmapreduce.reduce.speculative=false"); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index b4bffb4..89cfd18 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -75,6 +75,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; +import org.apache.hadoop.hbase.backup.impl.BackupManager; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -390,6 +391,7 @@ public class HMaster extends HRegionServer implements MasterServices { this.conf.setBoolean(HConstants.USE_META_REPLICAS, false); Replication.decorateMasterConfiguration(this.conf); + BackupManager.decorateMasterConfiguration(this.conf); // Hack! Maps DFSClient => Master for logs. HDFS made this // config param for task trackers, but we can piggyback off of it. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java index 95c3ffe..b6e11ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java @@ -37,7 +37,7 @@ public abstract class RegionServerProcedureManager extends ProcedureManager { * @param rss Region Server service interface * @throws KeeperException */ - public abstract void initialize(RegionServerServices rss) throws KeeperException; + public abstract void initialize(RegionServerServices rss) throws IOException; /** * Start accepting procedure requests. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java index 0f4ea64..adb3604 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java @@ -25,7 +25,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.procedure.flush.RegionServerFlushTableProcedureManager; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager; -import org.apache.zookeeper.KeeperException; /** * Provides the globally barriered procedure framework and environment @@ -39,7 +38,7 @@ public class RegionServerProcedureManagerHost extends private static final Log LOG = LogFactory .getLog(RegionServerProcedureManagerHost.class); - public void initialize(RegionServerServices rss) throws KeeperException { + public void initialize(RegionServerServices rss) throws IOException { for (RegionServerProcedureManager proc : procedures) { LOG.debug("Procedure " + proc.getProcedureSignature() + " is initializing"); proc.initialize(rss); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java index 085d642..3865ba9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java @@ -54,7 +54,7 @@ public class ZKProcedureCoordinatorRpcs implements ProcedureCoordinatorRpcs { * @throws KeeperException if an unexpected zk error occurs */ public ZKProcedureCoordinatorRpcs(ZooKeeperWatcher watcher, - String procedureClass, String coordName) throws KeeperException { + String procedureClass, String coordName) throws IOException { this.watcher = watcher; this.procedureType = procedureClass; this.coordName = coordName; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java index 2e03a60..9b491fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java @@ -68,49 +68,53 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs { * @throws KeeperException if we can't reach zookeeper */ public ZKProcedureMemberRpcs(final ZooKeeperWatcher watcher, final String procType) - throws KeeperException { - this.zkController = new ZKProcedureUtil(watcher, procType) { - @Override - public void nodeCreated(String path) { - if (!isInProcedurePath(path)) { - return; - } + throws IOException { + try { + this.zkController = new ZKProcedureUtil(watcher, procType) { + @Override + public void nodeCreated(String path) { + if (!isInProcedurePath(path)) { + return; + } - LOG.info("Received created event:" + path); - // if it is a simple start/end/abort then we just rewatch the node - if (isAcquiredNode(path)) { - waitForNewProcedures(); - return; - } else if (isAbortNode(path)) { - watchForAbortedProcedures(); - return; + LOG.info("Received created event:" + path); + // if it is a simple start/end/abort then we just rewatch the node + if (isAcquiredNode(path)) { + waitForNewProcedures(); + return; + } else if (isAbortNode(path)) { + watchForAbortedProcedures(); + return; + } + String parent = ZKUtil.getParent(path); + // if its the end barrier, the procedure can be completed + if (isReachedNode(parent)) { + receivedReachedGlobalBarrier(path); + return; + } else if (isAbortNode(parent)) { + abort(path); + return; + } else if (isAcquiredNode(parent)) { + startNewSubprocedure(path); + } else { + LOG.debug("Ignoring created notification for node:" + path); + } } - String parent = ZKUtil.getParent(path); - // if its the end barrier, the procedure can be completed - if (isReachedNode(parent)) { - receivedReachedGlobalBarrier(path); - return; - } else if (isAbortNode(parent)) { - abort(path); - return; - } else if (isAcquiredNode(parent)) { - startNewSubprocedure(path); - } else { - LOG.debug("Ignoring created notification for node:" + path); - } - } - @Override - public void nodeChildrenChanged(String path) { - if (path.equals(this.acquiredZnode)) { - LOG.info("Received procedure start children changed event: " + path); - waitForNewProcedures(); - } else if (path.equals(this.abortZnode)) { - LOG.info("Received procedure abort children changed event: " + path); - watchForAbortedProcedures(); + @Override + public void nodeChildrenChanged(String path) { + if (path.equals(this.acquiredZnode)) { + LOG.info("Received procedure start children changed event: " + path); + waitForNewProcedures(); + } else if (path.equals(this.abortZnode)) { + LOG.info("Received procedure abort children changed event: " + path); + watchForAbortedProcedures(); + } } - } - }; + }; + } catch (KeeperException e) { + throw new IOException(e); + } } public ZKProcedureUtil getZkController() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java index 1aa959c..bd65cc7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java @@ -317,7 +317,7 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur * @throws KeeperException if the zookeeper cannot be reached */ @Override - public void initialize(RegionServerServices rss) throws KeeperException { + public void initialize(RegionServerServices rss) throws IOException { this.rss = rss; ZooKeeperWatcher zkw = rss.getZooKeeper(); this.memberRpcs = new ZKProcedureMemberRpcs(zkw, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 4ab2693..0ce8ee4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -828,8 +828,8 @@ public class HRegionServer extends HasThread implements rspmHost = new RegionServerProcedureManagerHost(); rspmHost.loadProcedures(conf); rspmHost.initialize(this); - } catch (KeeperException e) { - this.abort("Failed to reach zk cluster when creating procedure handler.", e); + } catch (IOException e) { + this.abort("Failed to reach coordination cluster when creating procedure handler.", e); } // register watcher for recovering regions this.recoveringRegionWatcher = new RecoveringRegionWatcher(this.zooKeeper, this); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java index 537329a..e56dd28 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java @@ -390,7 +390,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { * @throws KeeperException if the zookeeper cluster cannot be reached */ @Override - public void initialize(RegionServerServices rss) throws KeeperException { + public void initialize(RegionServerServices rss) throws IOException { this.rss = rss; ZooKeeperWatcher zkw = rss.getZooKeeper(); this.memberRpcs = new ZKProcedureMemberRpcs(zkw, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index f3f869c..31f05c2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -96,6 +96,8 @@ import com.lmax.disruptor.TimeoutException; import com.lmax.disruptor.dsl.Disruptor; import com.lmax.disruptor.dsl.ProducerType; + + /** * Implementation of {@link WAL} to go against {@link FileSystem}; i.e. keep WALs in HDFS. * Only one WAL is ever being written at a time. When a WAL hits a configured maximum size, @@ -356,7 +358,9 @@ public class FSHLog implements WAL { public int compare(Path o1, Path o2) { long t1 = getFileNumFromFileName(o1); long t2 = getFileNumFromFileName(o2); - if (t1 == t2) return 0; + if (t1 == t2) { + return 0; + } return (t1 > t2) ? 1 : -1; } }; @@ -399,7 +403,7 @@ public class FSHLog implements WAL { * @param root path for stored and archived wals * @param logDir dir where wals are stored * @param conf configuration to use - * @throws IOException + * @throws IOException exception */ public FSHLog(final FileSystem fs, final Path root, final String logDir, final Configuration conf) throws IOException { @@ -407,7 +411,7 @@ public class FSHLog implements WAL { } /** - * Create an edit log at the given dir location. + * Create an edit log at the given directory location. * * You should never have to load an existing log. If there is a log at * startup, it should have already been processed and deleted by the time the @@ -422,13 +426,13 @@ public class FSHLog implements WAL { * be registered before we do anything else; e.g. the * Constructor {@link #rollWriter()}. * @param failIfWALExists If true IOException will be thrown if files related to this wal - * already exist. + * already exist. * @param prefix should always be hostname and port in distributed env and - * it will be URL encoded before being used. - * If prefix is null, "wal" will be used + * it will be URL encoded before being used. + * If prefix is null, "wal" will be used * @param suffix will be url encoded. null is treated as empty. non-empty must start with - * {@link DefaultWALProvider#WAL_FILE_NAME_DELIMITER} - * @throws IOException + * {@link DefaultWALProvider#WAL_FILE_NAME_DELIMITER} + * @throws IOException exception */ public FSHLog(final FileSystem fs, final Path rootDir, final String logDir, final String archiveDir, final Configuration conf, @@ -590,7 +594,9 @@ public class FSHLog implements WAL { @VisibleForTesting OutputStream getOutputStream() { FSDataOutputStream fsdos = this.hdfs_out; - if (fsdos == null) return null; + if (fsdos == null) { + return null; + } return fsdos.getWrappedStream(); } @@ -625,7 +631,7 @@ public class FSHLog implements WAL { /** * Tell listeners about pre log roll. - * @throws IOException + * @throws IOException exception */ private void tellListenersAboutPreLogRoll(final Path oldPath, final Path newPath) throws IOException { @@ -638,7 +644,7 @@ public class FSHLog implements WAL { /** * Tell listeners about post log roll. - * @throws IOException + * @throws IOException exception */ private void tellListenersAboutPostLogRoll(final Path oldPath, final Path newPath) throws IOException { @@ -651,8 +657,7 @@ public class FSHLog implements WAL { /** * Run a sync after opening to set up the pipeline. - * @param nextWriter - * @param startTimeNanos + * @param nextWriter next writer */ private void preemptiveSync(final ProtobufLogWriter nextWriter) { long startTimeNanos = System.nanoTime(); @@ -670,7 +675,9 @@ public class FSHLog implements WAL { rollWriterLock.lock(); try { // Return if nothing to flush. - if (!force && (this.writer != null && this.numEntries.get() <= 0)) return null; + if (!force && (this.writer != null && this.numEntries.get() <= 0)) { + return null; + } byte [][] regionsToFlush = null; if (this.closed) { LOG.debug("WAL closed. Skipping rolling of writer"); @@ -725,7 +732,7 @@ public class FSHLog implements WAL { /** * Archive old logs. A WAL is eligible for archiving if all its WALEdits have been flushed. - * @throws IOException + * @throws IOException exception */ private void cleanOldLogs() throws IOException { List logsToArchive = null; @@ -735,9 +742,13 @@ public class FSHLog implements WAL { Path log = e.getKey(); Map sequenceNums = e.getValue(); if (this.sequenceIdAccounting.areAllLower(sequenceNums)) { - if (logsToArchive == null) logsToArchive = new ArrayList(); + if (logsToArchive == null) { + logsToArchive = new ArrayList(); + } logsToArchive.add(log); - if (LOG.isTraceEnabled()) LOG.trace("WAL file ready for archiving " + log); + if (LOG.isTraceEnabled()) { + LOG.trace("WAL file ready for archiving " + log); + } } } if (logsToArchive != null) { @@ -767,7 +778,9 @@ public class FSHLog implements WAL { if (regions != null) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < regions.length; i++) { - if (i > 0) sb.append(", "); + if (i > 0) { + sb.append(", "); + } sb.append(Bytes.toStringBinary(regions[i])); } LOG.info("Too many WALs; count=" + logCount + ", max=" + this.maxLogs + @@ -833,7 +846,9 @@ public class FSHLog implements WAL { } } catch (FailedSyncBeforeLogCloseException e) { // If unflushed/unsynced entries on close, it is reason to abort. - if (isUnflushedEntries()) throw e; + if (isUnflushedEntries()) { + throw e; + } LOG.warn("Failed sync-before-close but no outstanding appends; closing WAL: " + e.getMessage()); } @@ -894,7 +909,9 @@ public class FSHLog implements WAL { try { blockOnSync(syncFuture); } catch (IOException ioe) { - if (LOG.isTraceEnabled()) LOG.trace("Stale sync exception", ioe); + if (LOG.isTraceEnabled()) { + LOG.trace("Stale sync exception", ioe); + } } } } @@ -965,7 +982,15 @@ public class FSHLog implements WAL { public Path getCurrentFileName() { return computeFilename(this.filenum.get()); } - + + /** + * To support old API compatibility + * @return current file number (timestamp) + */ + public long getFilenum() { + return filenum.get(); + } + @Override public String toString() { return "FSHLog " + logFilePrefix + ":" + logFileSuffix + "(num " + filenum + ")"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java index 027e7a2..dd4d337 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java @@ -209,13 +209,18 @@ public class DefaultWALProvider implements WALProvider { @VisibleForTesting public static long extractFileNumFromWAL(final WAL wal) { final Path walName = ((FSHLog)wal).getCurrentFileName(); + return extractFileNumFromWAL(walName); + } + + @VisibleForTesting + public static long extractFileNumFromWAL(final Path walName) { if (walName == null) { throw new IllegalArgumentException("The WAL path couldn't be null"); } final String[] walPathStrs = walName.toString().split("\\" + WAL_FILE_NAME_DELIMITER); return Long.parseLong(walPathStrs[walPathStrs.length - (isMetaFile(walName) ? 2:1)]); } - + /** * Pattern used to validate a WAL file name * see {@link #validateWALFilename(String)} for description. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java index 7620bbb..cd2efad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java @@ -49,7 +49,7 @@ public class SimpleRSProcedureManager extends RegionServerProcedureManager { private ProcedureMember member; @Override - public void initialize(RegionServerServices rss) throws KeeperException { + public void initialize(RegionServerServices rss) throws IOException { this.rss = rss; ZooKeeperWatcher zkw = rss.getZooKeeper(); this.memberRpcs = new ZKProcedureMemberRpcs(zkw, getProcedureSignature()); diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java new file mode 100644 index 0000000..1a7a1ba --- /dev/null +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java @@ -0,0 +1,9143 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: Backup.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class BackupProtos { + private BackupProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + /** + * Protobuf enum {@code hbase.pb.BackupType} + */ + public enum BackupType + implements com.google.protobuf.ProtocolMessageEnum { + /** + * FULL = 0; + */ + FULL(0, 0), + /** + * INCREMENTAL = 1; + */ + INCREMENTAL(1, 1), + ; + + /** + * FULL = 0; + */ + public static final int FULL_VALUE = 0; + /** + * INCREMENTAL = 1; + */ + public static final int INCREMENTAL_VALUE = 1; + + + public final int getNumber() { return value; } + + public static BackupType valueOf(int value) { + switch (value) { + case 0: return FULL; + case 1: return INCREMENTAL; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public BackupType findValueByNumber(int number) { + return BackupType.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(0); + } + + private static final BackupType[] VALUES = values(); + + public static BackupType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private BackupType(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.BackupType) + } + + public interface BackupImageOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string backup_id = 1; + /** + * required string backup_id = 1; + */ + boolean hasBackupId(); + /** + * required string backup_id = 1; + */ + java.lang.String getBackupId(); + /** + * required string backup_id = 1; + */ + com.google.protobuf.ByteString + getBackupIdBytes(); + + // required .hbase.pb.BackupType backup_type = 2; + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + boolean hasBackupType(); + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType(); + + // required string root_dir = 3; + /** + * required string root_dir = 3; + */ + boolean hasRootDir(); + /** + * required string root_dir = 3; + */ + java.lang.String getRootDir(); + /** + * required string root_dir = 3; + */ + com.google.protobuf.ByteString + getRootDirBytes(); + + // repeated .hbase.pb.TableName table_list = 4; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + java.util.List + getTableListList(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + int getTableListCount(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + java.util.List + getTableListOrBuilderList(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index); + + // required uint64 start_ts = 5; + /** + * required uint64 start_ts = 5; + */ + boolean hasStartTs(); + /** + * required uint64 start_ts = 5; + */ + long getStartTs(); + + // required uint64 complete_ts = 6; + /** + * required uint64 complete_ts = 6; + */ + boolean hasCompleteTs(); + /** + * required uint64 complete_ts = 6; + */ + long getCompleteTs(); + + // repeated .hbase.pb.BackupImage ancestors = 7; + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + java.util.List + getAncestorsList(); + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getAncestors(int index); + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + int getAncestorsCount(); + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + java.util.List + getAncestorsOrBuilderList(); + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getAncestorsOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.BackupImage} + */ + public static final class BackupImage extends + com.google.protobuf.GeneratedMessage + implements BackupImageOrBuilder { + // Use BackupImage.newBuilder() to construct. + private BackupImage(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BackupImage(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BackupImage defaultInstance; + public static BackupImage getDefaultInstance() { + return defaultInstance; + } + + public BackupImage getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BackupImage( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + backupId_ = input.readBytes(); + break; + } + case 16: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + backupType_ = value; + } + break; + } + case 26: { + bitField0_ |= 0x00000004; + rootDir_ = input.readBytes(); + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + tableList_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + case 40: { + bitField0_ |= 0x00000008; + startTs_ = input.readUInt64(); + break; + } + case 48: { + bitField0_ |= 0x00000010; + completeTs_ = input.readUInt64(); + break; + } + case 58: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + ancestors_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + ancestors_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = java.util.Collections.unmodifiableList(tableList_); + } + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + ancestors_ = java.util.Collections.unmodifiableList(ancestors_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BackupImage parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BackupImage(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string backup_id = 1; + public static final int BACKUP_ID_FIELD_NUMBER = 1; + private java.lang.Object backupId_; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + backupId_ = s; + } + return s; + } + } + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .hbase.pb.BackupType backup_type = 2; + public static final int BACKUP_TYPE_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType backupType_; + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + public boolean hasBackupType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType() { + return backupType_; + } + + // required string root_dir = 3; + public static final int ROOT_DIR_FIELD_NUMBER = 3; + private java.lang.Object rootDir_; + /** + * required string root_dir = 3; + */ + public boolean hasRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string root_dir = 3; + */ + public java.lang.String getRootDir() { + java.lang.Object ref = rootDir_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + rootDir_ = s; + } + return s; + } + } + /** + * required string root_dir = 3; + */ + public com.google.protobuf.ByteString + getRootDirBytes() { + java.lang.Object ref = rootDir_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + rootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .hbase.pb.TableName table_list = 4; + public static final int TABLE_LIST_FIELD_NUMBER = 4; + private java.util.List tableList_; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List getTableListList() { + return tableList_; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List + getTableListOrBuilderList() { + return tableList_; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public int getTableListCount() { + return tableList_.size(); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { + return tableList_.get(index); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index) { + return tableList_.get(index); + } + + // required uint64 start_ts = 5; + public static final int START_TS_FIELD_NUMBER = 5; + private long startTs_; + /** + * required uint64 start_ts = 5; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 start_ts = 5; + */ + public long getStartTs() { + return startTs_; + } + + // required uint64 complete_ts = 6; + public static final int COMPLETE_TS_FIELD_NUMBER = 6; + private long completeTs_; + /** + * required uint64 complete_ts = 6; + */ + public boolean hasCompleteTs() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint64 complete_ts = 6; + */ + public long getCompleteTs() { + return completeTs_; + } + + // repeated .hbase.pb.BackupImage ancestors = 7; + public static final int ANCESTORS_FIELD_NUMBER = 7; + private java.util.List ancestors_; + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public java.util.List getAncestorsList() { + return ancestors_; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public java.util.List + getAncestorsOrBuilderList() { + return ancestors_; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public int getAncestorsCount() { + return ancestors_.size(); + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getAncestors(int index) { + return ancestors_.get(index); + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getAncestorsOrBuilder( + int index) { + return ancestors_.get(index); + } + + private void initFields() { + backupId_ = ""; + backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + rootDir_ = ""; + tableList_ = java.util.Collections.emptyList(); + startTs_ = 0L; + completeTs_ = 0L; + ancestors_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBackupId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasBackupType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRootDir()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasStartTs()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasCompleteTs()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTableListCount(); i++) { + if (!getTableList(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getAncestorsCount(); i++) { + if (!getAncestors(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getBackupIdBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeEnum(2, backupType_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getRootDirBytes()); + } + for (int i = 0; i < tableList_.size(); i++) { + output.writeMessage(4, tableList_.get(i)); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(5, startTs_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeUInt64(6, completeTs_); + } + for (int i = 0; i < ancestors_.size(); i++) { + output.writeMessage(7, ancestors_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getBackupIdBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(2, backupType_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getRootDirBytes()); + } + for (int i = 0; i < tableList_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, tableList_.get(i)); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(5, startTs_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(6, completeTs_); + } + for (int i = 0; i < ancestors_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, ancestors_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) obj; + + boolean result = true; + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); + } + result = result && (hasBackupType() == other.hasBackupType()); + if (hasBackupType()) { + result = result && + (getBackupType() == other.getBackupType()); + } + result = result && (hasRootDir() == other.hasRootDir()); + if (hasRootDir()) { + result = result && getRootDir() + .equals(other.getRootDir()); + } + result = result && getTableListList() + .equals(other.getTableListList()); + result = result && (hasStartTs() == other.hasStartTs()); + if (hasStartTs()) { + result = result && (getStartTs() + == other.getStartTs()); + } + result = result && (hasCompleteTs() == other.hasCompleteTs()); + if (hasCompleteTs()) { + result = result && (getCompleteTs() + == other.getCompleteTs()); + } + result = result && getAncestorsList() + .equals(other.getAncestorsList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + } + if (hasBackupType()) { + hash = (37 * hash) + BACKUP_TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getBackupType()); + } + if (hasRootDir()) { + hash = (37 * hash) + ROOT_DIR_FIELD_NUMBER; + hash = (53 * hash) + getRootDir().hashCode(); + } + if (getTableListCount() > 0) { + hash = (37 * hash) + TABLE_LIST_FIELD_NUMBER; + hash = (53 * hash) + getTableListList().hashCode(); + } + if (hasStartTs()) { + hash = (37 * hash) + START_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartTs()); + } + if (hasCompleteTs()) { + hash = (37 * hash) + COMPLETE_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getCompleteTs()); + } + if (getAncestorsCount() > 0) { + hash = (37 * hash) + ANCESTORS_FIELD_NUMBER; + hash = (53 * hash) + getAncestorsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupImage} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableListFieldBuilder(); + getAncestorsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + backupId_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + bitField0_ = (bitField0_ & ~0x00000002); + rootDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + if (tableListBuilder_ == null) { + tableList_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + } else { + tableListBuilder_.clear(); + } + startTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000010); + completeTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000020); + if (ancestorsBuilder_ == null) { + ancestors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ancestorsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.backupId_ = backupId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.backupType_ = backupType_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.rootDir_ = rootDir_; + if (tableListBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = java.util.Collections.unmodifiableList(tableList_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.tableList_ = tableList_; + } else { + result.tableList_ = tableListBuilder_.build(); + } + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000008; + } + result.startTs_ = startTs_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000010; + } + result.completeTs_ = completeTs_; + if (ancestorsBuilder_ == null) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { + ancestors_ = java.util.Collections.unmodifiableList(ancestors_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.ancestors_ = ancestors_; + } else { + result.ancestors_ = ancestorsBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()) return this; + if (other.hasBackupId()) { + bitField0_ |= 0x00000001; + backupId_ = other.backupId_; + onChanged(); + } + if (other.hasBackupType()) { + setBackupType(other.getBackupType()); + } + if (other.hasRootDir()) { + bitField0_ |= 0x00000004; + rootDir_ = other.rootDir_; + onChanged(); + } + if (tableListBuilder_ == null) { + if (!other.tableList_.isEmpty()) { + if (tableList_.isEmpty()) { + tableList_ = other.tableList_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureTableListIsMutable(); + tableList_.addAll(other.tableList_); + } + onChanged(); + } + } else { + if (!other.tableList_.isEmpty()) { + if (tableListBuilder_.isEmpty()) { + tableListBuilder_.dispose(); + tableListBuilder_ = null; + tableList_ = other.tableList_; + bitField0_ = (bitField0_ & ~0x00000008); + tableListBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableListFieldBuilder() : null; + } else { + tableListBuilder_.addAllMessages(other.tableList_); + } + } + } + if (other.hasStartTs()) { + setStartTs(other.getStartTs()); + } + if (other.hasCompleteTs()) { + setCompleteTs(other.getCompleteTs()); + } + if (ancestorsBuilder_ == null) { + if (!other.ancestors_.isEmpty()) { + if (ancestors_.isEmpty()) { + ancestors_ = other.ancestors_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureAncestorsIsMutable(); + ancestors_.addAll(other.ancestors_); + } + onChanged(); + } + } else { + if (!other.ancestors_.isEmpty()) { + if (ancestorsBuilder_.isEmpty()) { + ancestorsBuilder_.dispose(); + ancestorsBuilder_ = null; + ancestors_ = other.ancestors_; + bitField0_ = (bitField0_ & ~0x00000040); + ancestorsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getAncestorsFieldBuilder() : null; + } else { + ancestorsBuilder_.addAllMessages(other.ancestors_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBackupId()) { + + return false; + } + if (!hasBackupType()) { + + return false; + } + if (!hasRootDir()) { + + return false; + } + if (!hasStartTs()) { + + return false; + } + if (!hasCompleteTs()) { + + return false; + } + for (int i = 0; i < getTableListCount(); i++) { + if (!getTableList(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getAncestorsCount(); i++) { + if (!getAncestors(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string backup_id = 1; + private java.lang.Object backupId_ = ""; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string backup_id = 1; + */ + public Builder setBackupId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; + onChanged(); + return this; + } + /** + * required string backup_id = 1; + */ + public Builder clearBackupId() { + bitField0_ = (bitField0_ & ~0x00000001); + backupId_ = getDefaultInstance().getBackupId(); + onChanged(); + return this; + } + /** + * required string backup_id = 1; + */ + public Builder setBackupIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; + onChanged(); + return this; + } + + // required .hbase.pb.BackupType backup_type = 2; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + public boolean hasBackupType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType() { + return backupType_; + } + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + public Builder setBackupType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + backupType_ = value; + onChanged(); + return this; + } + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + public Builder clearBackupType() { + bitField0_ = (bitField0_ & ~0x00000002); + backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + onChanged(); + return this; + } + + // required string root_dir = 3; + private java.lang.Object rootDir_ = ""; + /** + * required string root_dir = 3; + */ + public boolean hasRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string root_dir = 3; + */ + public java.lang.String getRootDir() { + java.lang.Object ref = rootDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + rootDir_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string root_dir = 3; + */ + public com.google.protobuf.ByteString + getRootDirBytes() { + java.lang.Object ref = rootDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + rootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string root_dir = 3; + */ + public Builder setRootDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + rootDir_ = value; + onChanged(); + return this; + } + /** + * required string root_dir = 3; + */ + public Builder clearRootDir() { + bitField0_ = (bitField0_ & ~0x00000004); + rootDir_ = getDefaultInstance().getRootDir(); + onChanged(); + return this; + } + /** + * required string root_dir = 3; + */ + public Builder setRootDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + rootDir_ = value; + onChanged(); + return this; + } + + // repeated .hbase.pb.TableName table_list = 4; + private java.util.List tableList_ = + java.util.Collections.emptyList(); + private void ensureTableListIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = new java.util.ArrayList(tableList_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableListBuilder_; + + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List getTableListList() { + if (tableListBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableList_); + } else { + return tableListBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public int getTableListCount() { + if (tableListBuilder_ == null) { + return tableList_.size(); + } else { + return tableListBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { + if (tableListBuilder_ == null) { + return tableList_.get(index); + } else { + return tableListBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder setTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.set(index, value); + onChanged(); + } else { + tableListBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder setTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.set(index, builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.add(value); + onChanged(); + } else { + tableListBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.add(index, value); + onChanged(); + } else { + tableListBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.add(builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.add(index, builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addAllTableList( + java.lang.Iterable values) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + super.addAll(values, tableList_); + onChanged(); + } else { + tableListBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder clearTableList() { + if (tableListBuilder_ == null) { + tableList_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + tableListBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder removeTableList(int index) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.remove(index); + onChanged(); + } else { + tableListBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableListBuilder( + int index) { + return getTableListFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index) { + if (tableListBuilder_ == null) { + return tableList_.get(index); } else { + return tableListBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List + getTableListOrBuilderList() { + if (tableListBuilder_ != null) { + return tableListBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableList_); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder() { + return getTableListFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder( + int index) { + return getTableListFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List + getTableListBuilderList() { + return getTableListFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableListFieldBuilder() { + if (tableListBuilder_ == null) { + tableListBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableList_, + ((bitField0_ & 0x00000008) == 0x00000008), + getParentForChildren(), + isClean()); + tableList_ = null; + } + return tableListBuilder_; + } + + // required uint64 start_ts = 5; + private long startTs_ ; + /** + * required uint64 start_ts = 5; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint64 start_ts = 5; + */ + public long getStartTs() { + return startTs_; + } + /** + * required uint64 start_ts = 5; + */ + public Builder setStartTs(long value) { + bitField0_ |= 0x00000010; + startTs_ = value; + onChanged(); + return this; + } + /** + * required uint64 start_ts = 5; + */ + public Builder clearStartTs() { + bitField0_ = (bitField0_ & ~0x00000010); + startTs_ = 0L; + onChanged(); + return this; + } + + // required uint64 complete_ts = 6; + private long completeTs_ ; + /** + * required uint64 complete_ts = 6; + */ + public boolean hasCompleteTs() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * required uint64 complete_ts = 6; + */ + public long getCompleteTs() { + return completeTs_; + } + /** + * required uint64 complete_ts = 6; + */ + public Builder setCompleteTs(long value) { + bitField0_ |= 0x00000020; + completeTs_ = value; + onChanged(); + return this; + } + /** + * required uint64 complete_ts = 6; + */ + public Builder clearCompleteTs() { + bitField0_ = (bitField0_ & ~0x00000020); + completeTs_ = 0L; + onChanged(); + return this; + } + + // repeated .hbase.pb.BackupImage ancestors = 7; + private java.util.List ancestors_ = + java.util.Collections.emptyList(); + private void ensureAncestorsIsMutable() { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { + ancestors_ = new java.util.ArrayList(ancestors_); + bitField0_ |= 0x00000040; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder> ancestorsBuilder_; + + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public java.util.List getAncestorsList() { + if (ancestorsBuilder_ == null) { + return java.util.Collections.unmodifiableList(ancestors_); + } else { + return ancestorsBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public int getAncestorsCount() { + if (ancestorsBuilder_ == null) { + return ancestors_.size(); + } else { + return ancestorsBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getAncestors(int index) { + if (ancestorsBuilder_ == null) { + return ancestors_.get(index); + } else { + return ancestorsBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder setAncestors( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { + if (ancestorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAncestorsIsMutable(); + ancestors_.set(index, value); + onChanged(); + } else { + ancestorsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder setAncestors( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { + if (ancestorsBuilder_ == null) { + ensureAncestorsIsMutable(); + ancestors_.set(index, builderForValue.build()); + onChanged(); + } else { + ancestorsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder addAncestors(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { + if (ancestorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAncestorsIsMutable(); + ancestors_.add(value); + onChanged(); + } else { + ancestorsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder addAncestors( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { + if (ancestorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAncestorsIsMutable(); + ancestors_.add(index, value); + onChanged(); + } else { + ancestorsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder addAncestors( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { + if (ancestorsBuilder_ == null) { + ensureAncestorsIsMutable(); + ancestors_.add(builderForValue.build()); + onChanged(); + } else { + ancestorsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder addAncestors( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { + if (ancestorsBuilder_ == null) { + ensureAncestorsIsMutable(); + ancestors_.add(index, builderForValue.build()); + onChanged(); + } else { + ancestorsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder addAllAncestors( + java.lang.Iterable values) { + if (ancestorsBuilder_ == null) { + ensureAncestorsIsMutable(); + super.addAll(values, ancestors_); + onChanged(); + } else { + ancestorsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder clearAncestors() { + if (ancestorsBuilder_ == null) { + ancestors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + } else { + ancestorsBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder removeAncestors(int index) { + if (ancestorsBuilder_ == null) { + ensureAncestorsIsMutable(); + ancestors_.remove(index); + onChanged(); + } else { + ancestorsBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder getAncestorsBuilder( + int index) { + return getAncestorsFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getAncestorsOrBuilder( + int index) { + if (ancestorsBuilder_ == null) { + return ancestors_.get(index); } else { + return ancestorsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public java.util.List + getAncestorsOrBuilderList() { + if (ancestorsBuilder_ != null) { + return ancestorsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(ancestors_); + } + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addAncestorsBuilder() { + return getAncestorsFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()); + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addAncestorsBuilder( + int index) { + return getAncestorsFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()); + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public java.util.List + getAncestorsBuilderList() { + return getAncestorsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder> + getAncestorsFieldBuilder() { + if (ancestorsBuilder_ == null) { + ancestorsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder>( + ancestors_, + ((bitField0_ & 0x00000040) == 0x00000040), + getParentForChildren(), + isClean()); + ancestors_ = null; + } + return ancestorsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupImage) + } + + static { + defaultInstance = new BackupImage(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BackupImage) + } + + public interface ServerTimestampOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string server = 1; + /** + * required string server = 1; + */ + boolean hasServer(); + /** + * required string server = 1; + */ + java.lang.String getServer(); + /** + * required string server = 1; + */ + com.google.protobuf.ByteString + getServerBytes(); + + // required uint64 timestamp = 2; + /** + * required uint64 timestamp = 2; + */ + boolean hasTimestamp(); + /** + * required uint64 timestamp = 2; + */ + long getTimestamp(); + } + /** + * Protobuf type {@code hbase.pb.ServerTimestamp} + */ + public static final class ServerTimestamp extends + com.google.protobuf.GeneratedMessage + implements ServerTimestampOrBuilder { + // Use ServerTimestamp.newBuilder() to construct. + private ServerTimestamp(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ServerTimestamp(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ServerTimestamp defaultInstance; + public static ServerTimestamp getDefaultInstance() { + return defaultInstance; + } + + public ServerTimestamp getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ServerTimestamp( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + server_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + timestamp_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ServerTimestamp parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ServerTimestamp(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string server = 1; + public static final int SERVER_FIELD_NUMBER = 1; + private java.lang.Object server_; + /** + * required string server = 1; + */ + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string server = 1; + */ + public java.lang.String getServer() { + java.lang.Object ref = server_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + server_ = s; + } + return s; + } + } + /** + * required string server = 1; + */ + public com.google.protobuf.ByteString + getServerBytes() { + java.lang.Object ref = server_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + server_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required uint64 timestamp = 2; + public static final int TIMESTAMP_FIELD_NUMBER = 2; + private long timestamp_; + /** + * required uint64 timestamp = 2; + */ + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 timestamp = 2; + */ + public long getTimestamp() { + return timestamp_; + } + + private void initFields() { + server_ = ""; + timestamp_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasServer()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTimestamp()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getServerBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, timestamp_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getServerBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, timestamp_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp) obj; + + boolean result = true; + result = result && (hasServer() == other.hasServer()); + if (hasServer()) { + result = result && getServer() + .equals(other.getServer()); + } + result = result && (hasTimestamp() == other.hasTimestamp()); + if (hasTimestamp()) { + result = result && (getTimestamp() + == other.getTimestamp()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasServer()) { + hash = (37 * hash) + SERVER_FIELD_NUMBER; + hash = (53 * hash) + getServer().hashCode(); + } + if (hasTimestamp()) { + hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTimestamp()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ServerTimestamp} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + server_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + timestamp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.server_ = server_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.timestamp_ = timestamp_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance()) return this; + if (other.hasServer()) { + bitField0_ |= 0x00000001; + server_ = other.server_; + onChanged(); + } + if (other.hasTimestamp()) { + setTimestamp(other.getTimestamp()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasServer()) { + + return false; + } + if (!hasTimestamp()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string server = 1; + private java.lang.Object server_ = ""; + /** + * required string server = 1; + */ + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string server = 1; + */ + public java.lang.String getServer() { + java.lang.Object ref = server_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + server_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string server = 1; + */ + public com.google.protobuf.ByteString + getServerBytes() { + java.lang.Object ref = server_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + server_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string server = 1; + */ + public Builder setServer( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + server_ = value; + onChanged(); + return this; + } + /** + * required string server = 1; + */ + public Builder clearServer() { + bitField0_ = (bitField0_ & ~0x00000001); + server_ = getDefaultInstance().getServer(); + onChanged(); + return this; + } + /** + * required string server = 1; + */ + public Builder setServerBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + server_ = value; + onChanged(); + return this; + } + + // required uint64 timestamp = 2; + private long timestamp_ ; + /** + * required uint64 timestamp = 2; + */ + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 timestamp = 2; + */ + public long getTimestamp() { + return timestamp_; + } + /** + * required uint64 timestamp = 2; + */ + public Builder setTimestamp(long value) { + bitField0_ |= 0x00000002; + timestamp_ = value; + onChanged(); + return this; + } + /** + * required uint64 timestamp = 2; + */ + public Builder clearTimestamp() { + bitField0_ = (bitField0_ & ~0x00000002); + timestamp_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ServerTimestamp) + } + + static { + defaultInstance = new ServerTimestamp(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ServerTimestamp) + } + + public interface TableServerTimestampOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.TableName table = 1; + /** + * required .hbase.pb.TableName table = 1; + */ + boolean hasTable(); + /** + * required .hbase.pb.TableName table = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable(); + /** + * required .hbase.pb.TableName table = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder(); + + // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + java.util.List + getServerTimestampList(); + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index); + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + int getServerTimestampCount(); + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + java.util.List + getServerTimestampOrBuilderList(); + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.TableServerTimestamp} + */ + public static final class TableServerTimestamp extends + com.google.protobuf.GeneratedMessage + implements TableServerTimestampOrBuilder { + // Use TableServerTimestamp.newBuilder() to construct. + private TableServerTimestamp(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TableServerTimestamp(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TableServerTimestamp defaultInstance; + public static TableServerTimestamp getDefaultInstance() { + return defaultInstance; + } + + public TableServerTimestamp getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TableServerTimestamp( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = table_.toBuilder(); + } + table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(table_); + table_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + serverTimestamp_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = java.util.Collections.unmodifiableList(serverTimestamp_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableServerTimestamp parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableServerTimestamp(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.TableName table = 1; + public static final int TABLE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_; + /** + * required .hbase.pb.TableName table = 1; + */ + public boolean hasTable() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + return table_; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + return table_; + } + + // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + public static final int SERVER_TIMESTAMP_FIELD_NUMBER = 2; + private java.util.List serverTimestamp_; + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List getServerTimestampList() { + return serverTimestamp_; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List + getServerTimestampOrBuilderList() { + return serverTimestamp_; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public int getServerTimestampCount() { + return serverTimestamp_.size(); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index) { + return serverTimestamp_.get(index); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( + int index) { + return serverTimestamp_.get(index); + } + + private void initFields() { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + serverTimestamp_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTable()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTable().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getServerTimestampCount(); i++) { + if (!getServerTimestamp(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, table_); + } + for (int i = 0; i < serverTimestamp_.size(); i++) { + output.writeMessage(2, serverTimestamp_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, table_); + } + for (int i = 0; i < serverTimestamp_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, serverTimestamp_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp) obj; + + boolean result = true; + result = result && (hasTable() == other.hasTable()); + if (hasTable()) { + result = result && getTable() + .equals(other.getTable()); + } + result = result && getServerTimestampList() + .equals(other.getServerTimestampList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTable()) { + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + } + if (getServerTimestampCount() > 0) { + hash = (37 * hash) + SERVER_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + getServerTimestampList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.TableServerTimestamp} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableFieldBuilder(); + getServerTimestampFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (serverTimestampBuilder_ == null) { + serverTimestamp_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + serverTimestampBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableBuilder_ == null) { + result.table_ = table_; + } else { + result.table_ = tableBuilder_.build(); + } + if (serverTimestampBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = java.util.Collections.unmodifiableList(serverTimestamp_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.serverTimestamp_ = serverTimestamp_; + } else { + result.serverTimestamp_ = serverTimestampBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance()) return this; + if (other.hasTable()) { + mergeTable(other.getTable()); + } + if (serverTimestampBuilder_ == null) { + if (!other.serverTimestamp_.isEmpty()) { + if (serverTimestamp_.isEmpty()) { + serverTimestamp_ = other.serverTimestamp_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureServerTimestampIsMutable(); + serverTimestamp_.addAll(other.serverTimestamp_); + } + onChanged(); + } + } else { + if (!other.serverTimestamp_.isEmpty()) { + if (serverTimestampBuilder_.isEmpty()) { + serverTimestampBuilder_.dispose(); + serverTimestampBuilder_ = null; + serverTimestamp_ = other.serverTimestamp_; + bitField0_ = (bitField0_ & ~0x00000002); + serverTimestampBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getServerTimestampFieldBuilder() : null; + } else { + serverTimestampBuilder_.addAllMessages(other.serverTimestamp_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTable()) { + + return false; + } + if (!getTable().isInitialized()) { + + return false; + } + for (int i = 0; i < getServerTimestampCount(); i++) { + if (!getServerTimestamp(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.TableName table = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableBuilder_; + /** + * required .hbase.pb.TableName table = 1; + */ + public boolean hasTable() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + if (tableBuilder_ == null) { + return table_; + } else { + return tableBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + onChanged(); + } else { + tableBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder setTable( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableBuilder_ == null) { + table_ = builderForValue.build(); + onChanged(); + } else { + tableBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + table_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial(); + } else { + table_ = value; + } + onChanged(); + } else { + tableBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder clearTable() { + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + if (tableBuilder_ != null) { + return tableBuilder_.getMessageOrBuilder(); + } else { + return table_; + } + } + /** + * required .hbase.pb.TableName table = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableFieldBuilder() { + if (tableBuilder_ == null) { + tableBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + table_, + getParentForChildren(), + isClean()); + table_ = null; + } + return tableBuilder_; + } + + // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + private java.util.List serverTimestamp_ = + java.util.Collections.emptyList(); + private void ensureServerTimestampIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = new java.util.ArrayList(serverTimestamp_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder> serverTimestampBuilder_; + + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List getServerTimestampList() { + if (serverTimestampBuilder_ == null) { + return java.util.Collections.unmodifiableList(serverTimestamp_); + } else { + return serverTimestampBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public int getServerTimestampCount() { + if (serverTimestampBuilder_ == null) { + return serverTimestamp_.size(); + } else { + return serverTimestampBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index) { + if (serverTimestampBuilder_ == null) { + return serverTimestamp_.get(index); + } else { + return serverTimestampBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder setServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { + if (serverTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerTimestampIsMutable(); + serverTimestamp_.set(index, value); + onChanged(); + } else { + serverTimestampBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder setServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.set(index, builderForValue.build()); + onChanged(); + } else { + serverTimestampBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addServerTimestamp(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { + if (serverTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerTimestampIsMutable(); + serverTimestamp_.add(value); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { + if (serverTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerTimestampIsMutable(); + serverTimestamp_.add(index, value); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addServerTimestamp( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.add(builderForValue.build()); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.add(index, builderForValue.build()); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addAllServerTimestamp( + java.lang.Iterable values) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + super.addAll(values, serverTimestamp_); + onChanged(); + } else { + serverTimestampBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder clearServerTimestamp() { + if (serverTimestampBuilder_ == null) { + serverTimestamp_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + serverTimestampBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder removeServerTimestamp(int index) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.remove(index); + onChanged(); + } else { + serverTimestampBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder getServerTimestampBuilder( + int index) { + return getServerTimestampFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( + int index) { + if (serverTimestampBuilder_ == null) { + return serverTimestamp_.get(index); } else { + return serverTimestampBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List + getServerTimestampOrBuilderList() { + if (serverTimestampBuilder_ != null) { + return serverTimestampBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(serverTimestamp_); + } + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder addServerTimestampBuilder() { + return getServerTimestampFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder addServerTimestampBuilder( + int index) { + return getServerTimestampFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List + getServerTimestampBuilderList() { + return getServerTimestampFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder> + getServerTimestampFieldBuilder() { + if (serverTimestampBuilder_ == null) { + serverTimestampBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder>( + serverTimestamp_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + serverTimestamp_ = null; + } + return serverTimestampBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.TableServerTimestamp) + } + + static { + defaultInstance = new TableServerTimestamp(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.TableServerTimestamp) + } + + public interface BackupManifestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string version = 1; + /** + * required string version = 1; + */ + boolean hasVersion(); + /** + * required string version = 1; + */ + java.lang.String getVersion(); + /** + * required string version = 1; + */ + com.google.protobuf.ByteString + getVersionBytes(); + + // required string backup_id = 2; + /** + * required string backup_id = 2; + */ + boolean hasBackupId(); + /** + * required string backup_id = 2; + */ + java.lang.String getBackupId(); + /** + * required string backup_id = 2; + */ + com.google.protobuf.ByteString + getBackupIdBytes(); + + // required .hbase.pb.BackupType type = 3; + /** + * required .hbase.pb.BackupType type = 3; + */ + boolean hasType(); + /** + * required .hbase.pb.BackupType type = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType(); + + // repeated .hbase.pb.TableName table_list = 4; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + java.util.List + getTableListList(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + int getTableListCount(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + java.util.List + getTableListOrBuilderList(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index); + + // required uint64 start_ts = 5; + /** + * required uint64 start_ts = 5; + */ + boolean hasStartTs(); + /** + * required uint64 start_ts = 5; + */ + long getStartTs(); + + // required uint64 complete_ts = 6; + /** + * required uint64 complete_ts = 6; + */ + boolean hasCompleteTs(); + /** + * required uint64 complete_ts = 6; + */ + long getCompleteTs(); + + // required int64 total_bytes = 7; + /** + * required int64 total_bytes = 7; + */ + boolean hasTotalBytes(); + /** + * required int64 total_bytes = 7; + */ + long getTotalBytes(); + + // optional int64 log_bytes = 8; + /** + * optional int64 log_bytes = 8; + */ + boolean hasLogBytes(); + /** + * optional int64 log_bytes = 8; + */ + long getLogBytes(); + + // repeated .hbase.pb.TableServerTimestamp tst_map = 9; + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + java.util.List + getTstMapList(); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getTstMap(int index); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + int getTstMapCount(); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + java.util.List + getTstMapOrBuilderList(); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder getTstMapOrBuilder( + int index); + + // repeated .hbase.pb.BackupImage dependent_backup_image = 10; + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + java.util.List + getDependentBackupImageList(); + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDependentBackupImage(int index); + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + int getDependentBackupImageCount(); + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + java.util.List + getDependentBackupImageOrBuilderList(); + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getDependentBackupImageOrBuilder( + int index); + + // required bool compacted = 11; + /** + * required bool compacted = 11; + */ + boolean hasCompacted(); + /** + * required bool compacted = 11; + */ + boolean getCompacted(); + } + /** + * Protobuf type {@code hbase.pb.BackupManifest} + */ + public static final class BackupManifest extends + com.google.protobuf.GeneratedMessage + implements BackupManifestOrBuilder { + // Use BackupManifest.newBuilder() to construct. + private BackupManifest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BackupManifest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BackupManifest defaultInstance; + public static BackupManifest getDefaultInstance() { + return defaultInstance; + } + + public BackupManifest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BackupManifest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + version_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + backupId_ = input.readBytes(); + break; + } + case 24: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(3, rawValue); + } else { + bitField0_ |= 0x00000004; + type_ = value; + } + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + tableList_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + case 40: { + bitField0_ |= 0x00000008; + startTs_ = input.readUInt64(); + break; + } + case 48: { + bitField0_ |= 0x00000010; + completeTs_ = input.readUInt64(); + break; + } + case 56: { + bitField0_ |= 0x00000020; + totalBytes_ = input.readInt64(); + break; + } + case 64: { + bitField0_ |= 0x00000040; + logBytes_ = input.readInt64(); + break; + } + case 74: { + if (!((mutable_bitField0_ & 0x00000100) == 0x00000100)) { + tstMap_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000100; + } + tstMap_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.PARSER, extensionRegistry)); + break; + } + case 82: { + if (!((mutable_bitField0_ & 0x00000200) == 0x00000200)) { + dependentBackupImage_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000200; + } + dependentBackupImage_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.PARSER, extensionRegistry)); + break; + } + case 88: { + bitField0_ |= 0x00000080; + compacted_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = java.util.Collections.unmodifiableList(tableList_); + } + if (((mutable_bitField0_ & 0x00000100) == 0x00000100)) { + tstMap_ = java.util.Collections.unmodifiableList(tstMap_); + } + if (((mutable_bitField0_ & 0x00000200) == 0x00000200)) { + dependentBackupImage_ = java.util.Collections.unmodifiableList(dependentBackupImage_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BackupManifest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BackupManifest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string version = 1; + public static final int VERSION_FIELD_NUMBER = 1; + private java.lang.Object version_; + /** + * required string version = 1; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string version = 1; + */ + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + version_ = s; + } + return s; + } + } + /** + * required string version = 1; + */ + public com.google.protobuf.ByteString + getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string backup_id = 2; + public static final int BACKUP_ID_FIELD_NUMBER = 2; + private java.lang.Object backupId_; + /** + * required string backup_id = 2; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string backup_id = 2; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + backupId_ = s; + } + return s; + } + } + /** + * required string backup_id = 2; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .hbase.pb.BackupType type = 3; + public static final int TYPE_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_; + /** + * required .hbase.pb.BackupType type = 3; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .hbase.pb.BackupType type = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { + return type_; + } + + // repeated .hbase.pb.TableName table_list = 4; + public static final int TABLE_LIST_FIELD_NUMBER = 4; + private java.util.List tableList_; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List getTableListList() { + return tableList_; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List + getTableListOrBuilderList() { + return tableList_; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public int getTableListCount() { + return tableList_.size(); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { + return tableList_.get(index); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index) { + return tableList_.get(index); + } + + // required uint64 start_ts = 5; + public static final int START_TS_FIELD_NUMBER = 5; + private long startTs_; + /** + * required uint64 start_ts = 5; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 start_ts = 5; + */ + public long getStartTs() { + return startTs_; + } + + // required uint64 complete_ts = 6; + public static final int COMPLETE_TS_FIELD_NUMBER = 6; + private long completeTs_; + /** + * required uint64 complete_ts = 6; + */ + public boolean hasCompleteTs() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint64 complete_ts = 6; + */ + public long getCompleteTs() { + return completeTs_; + } + + // required int64 total_bytes = 7; + public static final int TOTAL_BYTES_FIELD_NUMBER = 7; + private long totalBytes_; + /** + * required int64 total_bytes = 7; + */ + public boolean hasTotalBytes() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * required int64 total_bytes = 7; + */ + public long getTotalBytes() { + return totalBytes_; + } + + // optional int64 log_bytes = 8; + public static final int LOG_BYTES_FIELD_NUMBER = 8; + private long logBytes_; + /** + * optional int64 log_bytes = 8; + */ + public boolean hasLogBytes() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional int64 log_bytes = 8; + */ + public long getLogBytes() { + return logBytes_; + } + + // repeated .hbase.pb.TableServerTimestamp tst_map = 9; + public static final int TST_MAP_FIELD_NUMBER = 9; + private java.util.List tstMap_; + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public java.util.List getTstMapList() { + return tstMap_; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public java.util.List + getTstMapOrBuilderList() { + return tstMap_; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public int getTstMapCount() { + return tstMap_.size(); + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getTstMap(int index) { + return tstMap_.get(index); + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder getTstMapOrBuilder( + int index) { + return tstMap_.get(index); + } + + // repeated .hbase.pb.BackupImage dependent_backup_image = 10; + public static final int DEPENDENT_BACKUP_IMAGE_FIELD_NUMBER = 10; + private java.util.List dependentBackupImage_; + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public java.util.List getDependentBackupImageList() { + return dependentBackupImage_; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public java.util.List + getDependentBackupImageOrBuilderList() { + return dependentBackupImage_; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public int getDependentBackupImageCount() { + return dependentBackupImage_.size(); + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDependentBackupImage(int index) { + return dependentBackupImage_.get(index); + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getDependentBackupImageOrBuilder( + int index) { + return dependentBackupImage_.get(index); + } + + // required bool compacted = 11; + public static final int COMPACTED_FIELD_NUMBER = 11; + private boolean compacted_; + /** + * required bool compacted = 11; + */ + public boolean hasCompacted() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * required bool compacted = 11; + */ + public boolean getCompacted() { + return compacted_; + } + + private void initFields() { + version_ = ""; + backupId_ = ""; + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + tableList_ = java.util.Collections.emptyList(); + startTs_ = 0L; + completeTs_ = 0L; + totalBytes_ = 0L; + logBytes_ = 0L; + tstMap_ = java.util.Collections.emptyList(); + dependentBackupImage_ = java.util.Collections.emptyList(); + compacted_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasVersion()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasBackupId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasStartTs()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasCompleteTs()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTotalBytes()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasCompacted()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTableListCount(); i++) { + if (!getTableList(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getTstMapCount(); i++) { + if (!getTstMap(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getDependentBackupImageCount(); i++) { + if (!getDependentBackupImage(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getVersionBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getBackupIdBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeEnum(3, type_.getNumber()); + } + for (int i = 0; i < tableList_.size(); i++) { + output.writeMessage(4, tableList_.get(i)); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(5, startTs_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeUInt64(6, completeTs_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeInt64(7, totalBytes_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeInt64(8, logBytes_); + } + for (int i = 0; i < tstMap_.size(); i++) { + output.writeMessage(9, tstMap_.get(i)); + } + for (int i = 0; i < dependentBackupImage_.size(); i++) { + output.writeMessage(10, dependentBackupImage_.get(i)); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeBool(11, compacted_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getVersionBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getBackupIdBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(3, type_.getNumber()); + } + for (int i = 0; i < tableList_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, tableList_.get(i)); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(5, startTs_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(6, completeTs_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(7, totalBytes_); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(8, logBytes_); + } + for (int i = 0; i < tstMap_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(9, tstMap_.get(i)); + } + for (int i = 0; i < dependentBackupImage_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(10, dependentBackupImage_.get(i)); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(11, compacted_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest) obj; + + boolean result = true; + result = result && (hasVersion() == other.hasVersion()); + if (hasVersion()) { + result = result && getVersion() + .equals(other.getVersion()); + } + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); + } + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && + (getType() == other.getType()); + } + result = result && getTableListList() + .equals(other.getTableListList()); + result = result && (hasStartTs() == other.hasStartTs()); + if (hasStartTs()) { + result = result && (getStartTs() + == other.getStartTs()); + } + result = result && (hasCompleteTs() == other.hasCompleteTs()); + if (hasCompleteTs()) { + result = result && (getCompleteTs() + == other.getCompleteTs()); + } + result = result && (hasTotalBytes() == other.hasTotalBytes()); + if (hasTotalBytes()) { + result = result && (getTotalBytes() + == other.getTotalBytes()); + } + result = result && (hasLogBytes() == other.hasLogBytes()); + if (hasLogBytes()) { + result = result && (getLogBytes() + == other.getLogBytes()); + } + result = result && getTstMapList() + .equals(other.getTstMapList()); + result = result && getDependentBackupImageList() + .equals(other.getDependentBackupImageList()); + result = result && (hasCompacted() == other.hasCompacted()); + if (hasCompacted()) { + result = result && (getCompacted() + == other.getCompacted()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasVersion()) { + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion().hashCode(); + } + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + } + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getType()); + } + if (getTableListCount() > 0) { + hash = (37 * hash) + TABLE_LIST_FIELD_NUMBER; + hash = (53 * hash) + getTableListList().hashCode(); + } + if (hasStartTs()) { + hash = (37 * hash) + START_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartTs()); + } + if (hasCompleteTs()) { + hash = (37 * hash) + COMPLETE_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getCompleteTs()); + } + if (hasTotalBytes()) { + hash = (37 * hash) + TOTAL_BYTES_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTotalBytes()); + } + if (hasLogBytes()) { + hash = (37 * hash) + LOG_BYTES_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getLogBytes()); + } + if (getTstMapCount() > 0) { + hash = (37 * hash) + TST_MAP_FIELD_NUMBER; + hash = (53 * hash) + getTstMapList().hashCode(); + } + if (getDependentBackupImageCount() > 0) { + hash = (37 * hash) + DEPENDENT_BACKUP_IMAGE_FIELD_NUMBER; + hash = (53 * hash) + getDependentBackupImageList().hashCode(); + } + if (hasCompacted()) { + hash = (37 * hash) + COMPACTED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getCompacted()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupManifest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableListFieldBuilder(); + getTstMapFieldBuilder(); + getDependentBackupImageFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + version_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + backupId_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + bitField0_ = (bitField0_ & ~0x00000004); + if (tableListBuilder_ == null) { + tableList_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + } else { + tableListBuilder_.clear(); + } + startTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000010); + completeTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000020); + totalBytes_ = 0L; + bitField0_ = (bitField0_ & ~0x00000040); + logBytes_ = 0L; + bitField0_ = (bitField0_ & ~0x00000080); + if (tstMapBuilder_ == null) { + tstMap_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000100); + } else { + tstMapBuilder_.clear(); + } + if (dependentBackupImageBuilder_ == null) { + dependentBackupImage_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000200); + } else { + dependentBackupImageBuilder_.clear(); + } + compacted_ = false; + bitField0_ = (bitField0_ & ~0x00000400); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.version_ = version_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.backupId_ = backupId_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.type_ = type_; + if (tableListBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = java.util.Collections.unmodifiableList(tableList_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.tableList_ = tableList_; + } else { + result.tableList_ = tableListBuilder_.build(); + } + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000008; + } + result.startTs_ = startTs_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000010; + } + result.completeTs_ = completeTs_; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000020; + } + result.totalBytes_ = totalBytes_; + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000040; + } + result.logBytes_ = logBytes_; + if (tstMapBuilder_ == null) { + if (((bitField0_ & 0x00000100) == 0x00000100)) { + tstMap_ = java.util.Collections.unmodifiableList(tstMap_); + bitField0_ = (bitField0_ & ~0x00000100); + } + result.tstMap_ = tstMap_; + } else { + result.tstMap_ = tstMapBuilder_.build(); + } + if (dependentBackupImageBuilder_ == null) { + if (((bitField0_ & 0x00000200) == 0x00000200)) { + dependentBackupImage_ = java.util.Collections.unmodifiableList(dependentBackupImage_); + bitField0_ = (bitField0_ & ~0x00000200); + } + result.dependentBackupImage_ = dependentBackupImage_; + } else { + result.dependentBackupImage_ = dependentBackupImageBuilder_.build(); + } + if (((from_bitField0_ & 0x00000400) == 0x00000400)) { + to_bitField0_ |= 0x00000080; + } + result.compacted_ = compacted_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.getDefaultInstance()) return this; + if (other.hasVersion()) { + bitField0_ |= 0x00000001; + version_ = other.version_; + onChanged(); + } + if (other.hasBackupId()) { + bitField0_ |= 0x00000002; + backupId_ = other.backupId_; + onChanged(); + } + if (other.hasType()) { + setType(other.getType()); + } + if (tableListBuilder_ == null) { + if (!other.tableList_.isEmpty()) { + if (tableList_.isEmpty()) { + tableList_ = other.tableList_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureTableListIsMutable(); + tableList_.addAll(other.tableList_); + } + onChanged(); + } + } else { + if (!other.tableList_.isEmpty()) { + if (tableListBuilder_.isEmpty()) { + tableListBuilder_.dispose(); + tableListBuilder_ = null; + tableList_ = other.tableList_; + bitField0_ = (bitField0_ & ~0x00000008); + tableListBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableListFieldBuilder() : null; + } else { + tableListBuilder_.addAllMessages(other.tableList_); + } + } + } + if (other.hasStartTs()) { + setStartTs(other.getStartTs()); + } + if (other.hasCompleteTs()) { + setCompleteTs(other.getCompleteTs()); + } + if (other.hasTotalBytes()) { + setTotalBytes(other.getTotalBytes()); + } + if (other.hasLogBytes()) { + setLogBytes(other.getLogBytes()); + } + if (tstMapBuilder_ == null) { + if (!other.tstMap_.isEmpty()) { + if (tstMap_.isEmpty()) { + tstMap_ = other.tstMap_; + bitField0_ = (bitField0_ & ~0x00000100); + } else { + ensureTstMapIsMutable(); + tstMap_.addAll(other.tstMap_); + } + onChanged(); + } + } else { + if (!other.tstMap_.isEmpty()) { + if (tstMapBuilder_.isEmpty()) { + tstMapBuilder_.dispose(); + tstMapBuilder_ = null; + tstMap_ = other.tstMap_; + bitField0_ = (bitField0_ & ~0x00000100); + tstMapBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTstMapFieldBuilder() : null; + } else { + tstMapBuilder_.addAllMessages(other.tstMap_); + } + } + } + if (dependentBackupImageBuilder_ == null) { + if (!other.dependentBackupImage_.isEmpty()) { + if (dependentBackupImage_.isEmpty()) { + dependentBackupImage_ = other.dependentBackupImage_; + bitField0_ = (bitField0_ & ~0x00000200); + } else { + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.addAll(other.dependentBackupImage_); + } + onChanged(); + } + } else { + if (!other.dependentBackupImage_.isEmpty()) { + if (dependentBackupImageBuilder_.isEmpty()) { + dependentBackupImageBuilder_.dispose(); + dependentBackupImageBuilder_ = null; + dependentBackupImage_ = other.dependentBackupImage_; + bitField0_ = (bitField0_ & ~0x00000200); + dependentBackupImageBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getDependentBackupImageFieldBuilder() : null; + } else { + dependentBackupImageBuilder_.addAllMessages(other.dependentBackupImage_); + } + } + } + if (other.hasCompacted()) { + setCompacted(other.getCompacted()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasVersion()) { + + return false; + } + if (!hasBackupId()) { + + return false; + } + if (!hasType()) { + + return false; + } + if (!hasStartTs()) { + + return false; + } + if (!hasCompleteTs()) { + + return false; + } + if (!hasTotalBytes()) { + + return false; + } + if (!hasCompacted()) { + + return false; + } + for (int i = 0; i < getTableListCount(); i++) { + if (!getTableList(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getTstMapCount(); i++) { + if (!getTstMap(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getDependentBackupImageCount(); i++) { + if (!getDependentBackupImage(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string version = 1; + private java.lang.Object version_ = ""; + /** + * required string version = 1; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string version = 1; + */ + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + version_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string version = 1; + */ + public com.google.protobuf.ByteString + getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string version = 1; + */ + public Builder setVersion( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + version_ = value; + onChanged(); + return this; + } + /** + * required string version = 1; + */ + public Builder clearVersion() { + bitField0_ = (bitField0_ & ~0x00000001); + version_ = getDefaultInstance().getVersion(); + onChanged(); + return this; + } + /** + * required string version = 1; + */ + public Builder setVersionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + version_ = value; + onChanged(); + return this; + } + + // required string backup_id = 2; + private java.lang.Object backupId_ = ""; + /** + * required string backup_id = 2; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string backup_id = 2; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string backup_id = 2; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string backup_id = 2; + */ + public Builder setBackupId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + backupId_ = value; + onChanged(); + return this; + } + /** + * required string backup_id = 2; + */ + public Builder clearBackupId() { + bitField0_ = (bitField0_ & ~0x00000002); + backupId_ = getDefaultInstance().getBackupId(); + onChanged(); + return this; + } + /** + * required string backup_id = 2; + */ + public Builder setBackupIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + backupId_ = value; + onChanged(); + return this; + } + + // required .hbase.pb.BackupType type = 3; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + /** + * required .hbase.pb.BackupType type = 3; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .hbase.pb.BackupType type = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { + return type_; + } + /** + * required .hbase.pb.BackupType type = 3; + */ + public Builder setType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + type_ = value; + onChanged(); + return this; + } + /** + * required .hbase.pb.BackupType type = 3; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000004); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + onChanged(); + return this; + } + + // repeated .hbase.pb.TableName table_list = 4; + private java.util.List tableList_ = + java.util.Collections.emptyList(); + private void ensureTableListIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = new java.util.ArrayList(tableList_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableListBuilder_; + + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List getTableListList() { + if (tableListBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableList_); + } else { + return tableListBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public int getTableListCount() { + if (tableListBuilder_ == null) { + return tableList_.size(); + } else { + return tableListBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { + if (tableListBuilder_ == null) { + return tableList_.get(index); + } else { + return tableListBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder setTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.set(index, value); + onChanged(); + } else { + tableListBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder setTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.set(index, builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.add(value); + onChanged(); + } else { + tableListBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.add(index, value); + onChanged(); + } else { + tableListBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.add(builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.add(index, builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addAllTableList( + java.lang.Iterable values) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + super.addAll(values, tableList_); + onChanged(); + } else { + tableListBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder clearTableList() { + if (tableListBuilder_ == null) { + tableList_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + tableListBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder removeTableList(int index) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.remove(index); + onChanged(); + } else { + tableListBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableListBuilder( + int index) { + return getTableListFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index) { + if (tableListBuilder_ == null) { + return tableList_.get(index); } else { + return tableListBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List + getTableListOrBuilderList() { + if (tableListBuilder_ != null) { + return tableListBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableList_); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder() { + return getTableListFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder( + int index) { + return getTableListFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List + getTableListBuilderList() { + return getTableListFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableListFieldBuilder() { + if (tableListBuilder_ == null) { + tableListBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableList_, + ((bitField0_ & 0x00000008) == 0x00000008), + getParentForChildren(), + isClean()); + tableList_ = null; + } + return tableListBuilder_; + } + + // required uint64 start_ts = 5; + private long startTs_ ; + /** + * required uint64 start_ts = 5; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint64 start_ts = 5; + */ + public long getStartTs() { + return startTs_; + } + /** + * required uint64 start_ts = 5; + */ + public Builder setStartTs(long value) { + bitField0_ |= 0x00000010; + startTs_ = value; + onChanged(); + return this; + } + /** + * required uint64 start_ts = 5; + */ + public Builder clearStartTs() { + bitField0_ = (bitField0_ & ~0x00000010); + startTs_ = 0L; + onChanged(); + return this; + } + + // required uint64 complete_ts = 6; + private long completeTs_ ; + /** + * required uint64 complete_ts = 6; + */ + public boolean hasCompleteTs() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * required uint64 complete_ts = 6; + */ + public long getCompleteTs() { + return completeTs_; + } + /** + * required uint64 complete_ts = 6; + */ + public Builder setCompleteTs(long value) { + bitField0_ |= 0x00000020; + completeTs_ = value; + onChanged(); + return this; + } + /** + * required uint64 complete_ts = 6; + */ + public Builder clearCompleteTs() { + bitField0_ = (bitField0_ & ~0x00000020); + completeTs_ = 0L; + onChanged(); + return this; + } + + // required int64 total_bytes = 7; + private long totalBytes_ ; + /** + * required int64 total_bytes = 7; + */ + public boolean hasTotalBytes() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * required int64 total_bytes = 7; + */ + public long getTotalBytes() { + return totalBytes_; + } + /** + * required int64 total_bytes = 7; + */ + public Builder setTotalBytes(long value) { + bitField0_ |= 0x00000040; + totalBytes_ = value; + onChanged(); + return this; + } + /** + * required int64 total_bytes = 7; + */ + public Builder clearTotalBytes() { + bitField0_ = (bitField0_ & ~0x00000040); + totalBytes_ = 0L; + onChanged(); + return this; + } + + // optional int64 log_bytes = 8; + private long logBytes_ ; + /** + * optional int64 log_bytes = 8; + */ + public boolean hasLogBytes() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional int64 log_bytes = 8; + */ + public long getLogBytes() { + return logBytes_; + } + /** + * optional int64 log_bytes = 8; + */ + public Builder setLogBytes(long value) { + bitField0_ |= 0x00000080; + logBytes_ = value; + onChanged(); + return this; + } + /** + * optional int64 log_bytes = 8; + */ + public Builder clearLogBytes() { + bitField0_ = (bitField0_ & ~0x00000080); + logBytes_ = 0L; + onChanged(); + return this; + } + + // repeated .hbase.pb.TableServerTimestamp tst_map = 9; + private java.util.List tstMap_ = + java.util.Collections.emptyList(); + private void ensureTstMapIsMutable() { + if (!((bitField0_ & 0x00000100) == 0x00000100)) { + tstMap_ = new java.util.ArrayList(tstMap_); + bitField0_ |= 0x00000100; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder> tstMapBuilder_; + + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public java.util.List getTstMapList() { + if (tstMapBuilder_ == null) { + return java.util.Collections.unmodifiableList(tstMap_); + } else { + return tstMapBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public int getTstMapCount() { + if (tstMapBuilder_ == null) { + return tstMap_.size(); + } else { + return tstMapBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getTstMap(int index) { + if (tstMapBuilder_ == null) { + return tstMap_.get(index); + } else { + return tstMapBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public Builder setTstMap( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp value) { + if (tstMapBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTstMapIsMutable(); + tstMap_.set(index, value); + onChanged(); + } else { + tstMapBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public Builder setTstMap( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder builderForValue) { + if (tstMapBuilder_ == null) { + ensureTstMapIsMutable(); + tstMap_.set(index, builderForValue.build()); + onChanged(); + } else { + tstMapBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public Builder addTstMap(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp value) { + if (tstMapBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTstMapIsMutable(); + tstMap_.add(value); + onChanged(); + } else { + tstMapBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public Builder addTstMap( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp value) { + if (tstMapBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTstMapIsMutable(); + tstMap_.add(index, value); + onChanged(); + } else { + tstMapBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public Builder addTstMap( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder builderForValue) { + if (tstMapBuilder_ == null) { + ensureTstMapIsMutable(); + tstMap_.add(builderForValue.build()); + onChanged(); + } else { + tstMapBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public Builder addTstMap( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder builderForValue) { + if (tstMapBuilder_ == null) { + ensureTstMapIsMutable(); + tstMap_.add(index, builderForValue.build()); + onChanged(); + } else { + tstMapBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public Builder addAllTstMap( + java.lang.Iterable values) { + if (tstMapBuilder_ == null) { + ensureTstMapIsMutable(); + super.addAll(values, tstMap_); + onChanged(); + } else { + tstMapBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public Builder clearTstMap() { + if (tstMapBuilder_ == null) { + tstMap_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000100); + onChanged(); + } else { + tstMapBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public Builder removeTstMap(int index) { + if (tstMapBuilder_ == null) { + ensureTstMapIsMutable(); + tstMap_.remove(index); + onChanged(); + } else { + tstMapBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder getTstMapBuilder( + int index) { + return getTstMapFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder getTstMapOrBuilder( + int index) { + if (tstMapBuilder_ == null) { + return tstMap_.get(index); } else { + return tstMapBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public java.util.List + getTstMapOrBuilderList() { + if (tstMapBuilder_ != null) { + return tstMapBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tstMap_); + } + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder addTstMapBuilder() { + return getTstMapFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder addTstMapBuilder( + int index) { + return getTstMapFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 9; + */ + public java.util.List + getTstMapBuilderList() { + return getTstMapFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder> + getTstMapFieldBuilder() { + if (tstMapBuilder_ == null) { + tstMapBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder>( + tstMap_, + ((bitField0_ & 0x00000100) == 0x00000100), + getParentForChildren(), + isClean()); + tstMap_ = null; + } + return tstMapBuilder_; + } + + // repeated .hbase.pb.BackupImage dependent_backup_image = 10; + private java.util.List dependentBackupImage_ = + java.util.Collections.emptyList(); + private void ensureDependentBackupImageIsMutable() { + if (!((bitField0_ & 0x00000200) == 0x00000200)) { + dependentBackupImage_ = new java.util.ArrayList(dependentBackupImage_); + bitField0_ |= 0x00000200; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder> dependentBackupImageBuilder_; + + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public java.util.List getDependentBackupImageList() { + if (dependentBackupImageBuilder_ == null) { + return java.util.Collections.unmodifiableList(dependentBackupImage_); + } else { + return dependentBackupImageBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public int getDependentBackupImageCount() { + if (dependentBackupImageBuilder_ == null) { + return dependentBackupImage_.size(); + } else { + return dependentBackupImageBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDependentBackupImage(int index) { + if (dependentBackupImageBuilder_ == null) { + return dependentBackupImage_.get(index); + } else { + return dependentBackupImageBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public Builder setDependentBackupImage( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { + if (dependentBackupImageBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.set(index, value); + onChanged(); + } else { + dependentBackupImageBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public Builder setDependentBackupImage( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { + if (dependentBackupImageBuilder_ == null) { + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.set(index, builderForValue.build()); + onChanged(); + } else { + dependentBackupImageBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public Builder addDependentBackupImage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { + if (dependentBackupImageBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.add(value); + onChanged(); + } else { + dependentBackupImageBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public Builder addDependentBackupImage( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { + if (dependentBackupImageBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.add(index, value); + onChanged(); + } else { + dependentBackupImageBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public Builder addDependentBackupImage( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { + if (dependentBackupImageBuilder_ == null) { + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.add(builderForValue.build()); + onChanged(); + } else { + dependentBackupImageBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public Builder addDependentBackupImage( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { + if (dependentBackupImageBuilder_ == null) { + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.add(index, builderForValue.build()); + onChanged(); + } else { + dependentBackupImageBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public Builder addAllDependentBackupImage( + java.lang.Iterable values) { + if (dependentBackupImageBuilder_ == null) { + ensureDependentBackupImageIsMutable(); + super.addAll(values, dependentBackupImage_); + onChanged(); + } else { + dependentBackupImageBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public Builder clearDependentBackupImage() { + if (dependentBackupImageBuilder_ == null) { + dependentBackupImage_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000200); + onChanged(); + } else { + dependentBackupImageBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public Builder removeDependentBackupImage(int index) { + if (dependentBackupImageBuilder_ == null) { + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.remove(index); + onChanged(); + } else { + dependentBackupImageBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder getDependentBackupImageBuilder( + int index) { + return getDependentBackupImageFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getDependentBackupImageOrBuilder( + int index) { + if (dependentBackupImageBuilder_ == null) { + return dependentBackupImage_.get(index); } else { + return dependentBackupImageBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public java.util.List + getDependentBackupImageOrBuilderList() { + if (dependentBackupImageBuilder_ != null) { + return dependentBackupImageBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(dependentBackupImage_); + } + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addDependentBackupImageBuilder() { + return getDependentBackupImageFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()); + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addDependentBackupImageBuilder( + int index) { + return getDependentBackupImageFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()); + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 10; + */ + public java.util.List + getDependentBackupImageBuilderList() { + return getDependentBackupImageFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder> + getDependentBackupImageFieldBuilder() { + if (dependentBackupImageBuilder_ == null) { + dependentBackupImageBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder>( + dependentBackupImage_, + ((bitField0_ & 0x00000200) == 0x00000200), + getParentForChildren(), + isClean()); + dependentBackupImage_ = null; + } + return dependentBackupImageBuilder_; + } + + // required bool compacted = 11; + private boolean compacted_ ; + /** + * required bool compacted = 11; + */ + public boolean hasCompacted() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * required bool compacted = 11; + */ + public boolean getCompacted() { + return compacted_; + } + /** + * required bool compacted = 11; + */ + public Builder setCompacted(boolean value) { + bitField0_ |= 0x00000400; + compacted_ = value; + onChanged(); + return this; + } + /** + * required bool compacted = 11; + */ + public Builder clearCompacted() { + bitField0_ = (bitField0_ & ~0x00000400); + compacted_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupManifest) + } + + static { + defaultInstance = new BackupManifest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BackupManifest) + } + + public interface TableBackupStatusOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.TableName table = 1; + /** + * required .hbase.pb.TableName table = 1; + */ + boolean hasTable(); + /** + * required .hbase.pb.TableName table = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable(); + /** + * required .hbase.pb.TableName table = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder(); + + // required string target_dir = 2; + /** + * required string target_dir = 2; + */ + boolean hasTargetDir(); + /** + * required string target_dir = 2; + */ + java.lang.String getTargetDir(); + /** + * required string target_dir = 2; + */ + com.google.protobuf.ByteString + getTargetDirBytes(); + + // optional string snapshot = 3; + /** + * optional string snapshot = 3; + */ + boolean hasSnapshot(); + /** + * optional string snapshot = 3; + */ + java.lang.String getSnapshot(); + /** + * optional string snapshot = 3; + */ + com.google.protobuf.ByteString + getSnapshotBytes(); + } + /** + * Protobuf type {@code hbase.pb.TableBackupStatus} + */ + public static final class TableBackupStatus extends + com.google.protobuf.GeneratedMessage + implements TableBackupStatusOrBuilder { + // Use TableBackupStatus.newBuilder() to construct. + private TableBackupStatus(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TableBackupStatus(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TableBackupStatus defaultInstance; + public static TableBackupStatus getDefaultInstance() { + return defaultInstance; + } + + public TableBackupStatus getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TableBackupStatus( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = table_.toBuilder(); + } + table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(table_); + table_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + bitField0_ |= 0x00000002; + targetDir_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + snapshot_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableBackupStatus parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableBackupStatus(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.TableName table = 1; + public static final int TABLE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_; + /** + * required .hbase.pb.TableName table = 1; + */ + public boolean hasTable() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + return table_; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + return table_; + } + + // required string target_dir = 2; + public static final int TARGET_DIR_FIELD_NUMBER = 2; + private java.lang.Object targetDir_; + /** + * required string target_dir = 2; + */ + public boolean hasTargetDir() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string target_dir = 2; + */ + public java.lang.String getTargetDir() { + java.lang.Object ref = targetDir_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetDir_ = s; + } + return s; + } + } + /** + * required string target_dir = 2; + */ + public com.google.protobuf.ByteString + getTargetDirBytes() { + java.lang.Object ref = targetDir_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string snapshot = 3; + public static final int SNAPSHOT_FIELD_NUMBER = 3; + private java.lang.Object snapshot_; + /** + * optional string snapshot = 3; + */ + public boolean hasSnapshot() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string snapshot = 3; + */ + public java.lang.String getSnapshot() { + java.lang.Object ref = snapshot_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + snapshot_ = s; + } + return s; + } + } + /** + * optional string snapshot = 3; + */ + public com.google.protobuf.ByteString + getSnapshotBytes() { + java.lang.Object ref = snapshot_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + snapshot_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + targetDir_ = ""; + snapshot_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTable()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTargetDir()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTable().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, table_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getTargetDirBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getSnapshotBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, table_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getTargetDirBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getSnapshotBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus) obj; + + boolean result = true; + result = result && (hasTable() == other.hasTable()); + if (hasTable()) { + result = result && getTable() + .equals(other.getTable()); + } + result = result && (hasTargetDir() == other.hasTargetDir()); + if (hasTargetDir()) { + result = result && getTargetDir() + .equals(other.getTargetDir()); + } + result = result && (hasSnapshot() == other.hasSnapshot()); + if (hasSnapshot()) { + result = result && getSnapshot() + .equals(other.getSnapshot()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTable()) { + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + } + if (hasTargetDir()) { + hash = (37 * hash) + TARGET_DIR_FIELD_NUMBER; + hash = (53 * hash) + getTargetDir().hashCode(); + } + if (hasSnapshot()) { + hash = (37 * hash) + SNAPSHOT_FIELD_NUMBER; + hash = (53 * hash) + getSnapshot().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.TableBackupStatus} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + targetDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + snapshot_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableBuilder_ == null) { + result.table_ = table_; + } else { + result.table_ = tableBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.targetDir_ = targetDir_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.snapshot_ = snapshot_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance()) return this; + if (other.hasTable()) { + mergeTable(other.getTable()); + } + if (other.hasTargetDir()) { + bitField0_ |= 0x00000002; + targetDir_ = other.targetDir_; + onChanged(); + } + if (other.hasSnapshot()) { + bitField0_ |= 0x00000004; + snapshot_ = other.snapshot_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTable()) { + + return false; + } + if (!hasTargetDir()) { + + return false; + } + if (!getTable().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.TableName table = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableBuilder_; + /** + * required .hbase.pb.TableName table = 1; + */ + public boolean hasTable() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + if (tableBuilder_ == null) { + return table_; + } else { + return tableBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + onChanged(); + } else { + tableBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder setTable( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableBuilder_ == null) { + table_ = builderForValue.build(); + onChanged(); + } else { + tableBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + table_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial(); + } else { + table_ = value; + } + onChanged(); + } else { + tableBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder clearTable() { + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + if (tableBuilder_ != null) { + return tableBuilder_.getMessageOrBuilder(); + } else { + return table_; + } + } + /** + * required .hbase.pb.TableName table = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableFieldBuilder() { + if (tableBuilder_ == null) { + tableBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + table_, + getParentForChildren(), + isClean()); + table_ = null; + } + return tableBuilder_; + } + + // required string target_dir = 2; + private java.lang.Object targetDir_ = ""; + /** + * required string target_dir = 2; + */ + public boolean hasTargetDir() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string target_dir = 2; + */ + public java.lang.String getTargetDir() { + java.lang.Object ref = targetDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetDir_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string target_dir = 2; + */ + public com.google.protobuf.ByteString + getTargetDirBytes() { + java.lang.Object ref = targetDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string target_dir = 2; + */ + public Builder setTargetDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + targetDir_ = value; + onChanged(); + return this; + } + /** + * required string target_dir = 2; + */ + public Builder clearTargetDir() { + bitField0_ = (bitField0_ & ~0x00000002); + targetDir_ = getDefaultInstance().getTargetDir(); + onChanged(); + return this; + } + /** + * required string target_dir = 2; + */ + public Builder setTargetDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + targetDir_ = value; + onChanged(); + return this; + } + + // optional string snapshot = 3; + private java.lang.Object snapshot_ = ""; + /** + * optional string snapshot = 3; + */ + public boolean hasSnapshot() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string snapshot = 3; + */ + public java.lang.String getSnapshot() { + java.lang.Object ref = snapshot_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + snapshot_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string snapshot = 3; + */ + public com.google.protobuf.ByteString + getSnapshotBytes() { + java.lang.Object ref = snapshot_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + snapshot_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string snapshot = 3; + */ + public Builder setSnapshot( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + snapshot_ = value; + onChanged(); + return this; + } + /** + * optional string snapshot = 3; + */ + public Builder clearSnapshot() { + bitField0_ = (bitField0_ & ~0x00000004); + snapshot_ = getDefaultInstance().getSnapshot(); + onChanged(); + return this; + } + /** + * optional string snapshot = 3; + */ + public Builder setSnapshotBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + snapshot_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.TableBackupStatus) + } + + static { + defaultInstance = new TableBackupStatus(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.TableBackupStatus) + } + + public interface BackupContextOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string backup_id = 1; + /** + * required string backup_id = 1; + */ + boolean hasBackupId(); + /** + * required string backup_id = 1; + */ + java.lang.String getBackupId(); + /** + * required string backup_id = 1; + */ + com.google.protobuf.ByteString + getBackupIdBytes(); + + // required .hbase.pb.BackupType type = 2; + /** + * required .hbase.pb.BackupType type = 2; + */ + boolean hasType(); + /** + * required .hbase.pb.BackupType type = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType(); + + // required string target_root_dir = 3; + /** + * required string target_root_dir = 3; + */ + boolean hasTargetRootDir(); + /** + * required string target_root_dir = 3; + */ + java.lang.String getTargetRootDir(); + /** + * required string target_root_dir = 3; + */ + com.google.protobuf.ByteString + getTargetRootDirBytes(); + + // optional .hbase.pb.BackupContext.BackupState state = 4; + /** + * optional .hbase.pb.BackupContext.BackupState state = 4; + */ + boolean hasState(); + /** + * optional .hbase.pb.BackupContext.BackupState state = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState getState(); + + // optional .hbase.pb.BackupContext.BackupPhase phase = 5; + /** + * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + */ + boolean hasPhase(); + /** + * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase getPhase(); + + // optional string failed_message = 6; + /** + * optional string failed_message = 6; + */ + boolean hasFailedMessage(); + /** + * optional string failed_message = 6; + */ + java.lang.String getFailedMessage(); + /** + * optional string failed_message = 6; + */ + com.google.protobuf.ByteString + getFailedMessageBytes(); + + // repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + java.util.List + getTableBackupStatusList(); + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getTableBackupStatus(int index); + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + int getTableBackupStatusCount(); + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + java.util.List + getTableBackupStatusOrBuilderList(); + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getTableBackupStatusOrBuilder( + int index); + + // optional uint64 start_ts = 8; + /** + * optional uint64 start_ts = 8; + */ + boolean hasStartTs(); + /** + * optional uint64 start_ts = 8; + */ + long getStartTs(); + + // optional uint64 end_ts = 9; + /** + * optional uint64 end_ts = 9; + */ + boolean hasEndTs(); + /** + * optional uint64 end_ts = 9; + */ + long getEndTs(); + + // optional int64 total_bytes_copied = 10; + /** + * optional int64 total_bytes_copied = 10; + */ + boolean hasTotalBytesCopied(); + /** + * optional int64 total_bytes_copied = 10; + */ + long getTotalBytesCopied(); + + // optional string hlog_target_dir = 11; + /** + * optional string hlog_target_dir = 11; + */ + boolean hasHlogTargetDir(); + /** + * optional string hlog_target_dir = 11; + */ + java.lang.String getHlogTargetDir(); + /** + * optional string hlog_target_dir = 11; + */ + com.google.protobuf.ByteString + getHlogTargetDirBytes(); + + // optional uint32 progress = 12; + /** + * optional uint32 progress = 12; + */ + boolean hasProgress(); + /** + * optional uint32 progress = 12; + */ + int getProgress(); + } + /** + * Protobuf type {@code hbase.pb.BackupContext} + */ + public static final class BackupContext extends + com.google.protobuf.GeneratedMessage + implements BackupContextOrBuilder { + // Use BackupContext.newBuilder() to construct. + private BackupContext(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BackupContext(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BackupContext defaultInstance; + public static BackupContext getDefaultInstance() { + return defaultInstance; + } + + public BackupContext getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BackupContext( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + backupId_ = input.readBytes(); + break; + } + case 16: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + type_ = value; + } + break; + } + case 26: { + bitField0_ |= 0x00000004; + targetRootDir_ = input.readBytes(); + break; + } + case 32: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(4, rawValue); + } else { + bitField0_ |= 0x00000008; + state_ = value; + } + break; + } + case 40: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(5, rawValue); + } else { + bitField0_ |= 0x00000010; + phase_ = value; + } + break; + } + case 50: { + bitField0_ |= 0x00000020; + failedMessage_ = input.readBytes(); + break; + } + case 58: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + tableBackupStatus_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + tableBackupStatus_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.PARSER, extensionRegistry)); + break; + } + case 64: { + bitField0_ |= 0x00000040; + startTs_ = input.readUInt64(); + break; + } + case 72: { + bitField0_ |= 0x00000080; + endTs_ = input.readUInt64(); + break; + } + case 80: { + bitField0_ |= 0x00000100; + totalBytesCopied_ = input.readInt64(); + break; + } + case 90: { + bitField0_ |= 0x00000200; + hlogTargetDir_ = input.readBytes(); + break; + } + case 96: { + bitField0_ |= 0x00000400; + progress_ = input.readUInt32(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + tableBackupStatus_ = java.util.Collections.unmodifiableList(tableBackupStatus_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BackupContext parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BackupContext(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + /** + * Protobuf enum {@code hbase.pb.BackupContext.BackupState} + */ + public enum BackupState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * WAITING = 0; + */ + WAITING(0, 0), + /** + * RUNNING = 1; + */ + RUNNING(1, 1), + /** + * COMPLETE = 2; + */ + COMPLETE(2, 2), + /** + * FAILED = 3; + */ + FAILED(3, 3), + /** + * CANCELLED = 4; + */ + CANCELLED(4, 4), + ; + + /** + * WAITING = 0; + */ + public static final int WAITING_VALUE = 0; + /** + * RUNNING = 1; + */ + public static final int RUNNING_VALUE = 1; + /** + * COMPLETE = 2; + */ + public static final int COMPLETE_VALUE = 2; + /** + * FAILED = 3; + */ + public static final int FAILED_VALUE = 3; + /** + * CANCELLED = 4; + */ + public static final int CANCELLED_VALUE = 4; + + + public final int getNumber() { return value; } + + public static BackupState valueOf(int value) { + switch (value) { + case 0: return WAITING; + case 1: return RUNNING; + case 2: return COMPLETE; + case 3: return FAILED; + case 4: return CANCELLED; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public BackupState findValueByNumber(int number) { + return BackupState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDescriptor().getEnumTypes().get(0); + } + + private static final BackupState[] VALUES = values(); + + public static BackupState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private BackupState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.BackupContext.BackupState) + } + + /** + * Protobuf enum {@code hbase.pb.BackupContext.BackupPhase} + */ + public enum BackupPhase + implements com.google.protobuf.ProtocolMessageEnum { + /** + * REQUEST = 0; + */ + REQUEST(0, 0), + /** + * SNAPSHOT = 1; + */ + SNAPSHOT(1, 1), + /** + * PREPARE_INCREMENTAL = 2; + */ + PREPARE_INCREMENTAL(2, 2), + /** + * SNAPSHOTCOPY = 3; + */ + SNAPSHOTCOPY(3, 3), + /** + * INCREMENTAL_COPY = 4; + */ + INCREMENTAL_COPY(4, 4), + /** + * STORE_MANIFEST = 5; + */ + STORE_MANIFEST(5, 5), + ; + + /** + * REQUEST = 0; + */ + public static final int REQUEST_VALUE = 0; + /** + * SNAPSHOT = 1; + */ + public static final int SNAPSHOT_VALUE = 1; + /** + * PREPARE_INCREMENTAL = 2; + */ + public static final int PREPARE_INCREMENTAL_VALUE = 2; + /** + * SNAPSHOTCOPY = 3; + */ + public static final int SNAPSHOTCOPY_VALUE = 3; + /** + * INCREMENTAL_COPY = 4; + */ + public static final int INCREMENTAL_COPY_VALUE = 4; + /** + * STORE_MANIFEST = 5; + */ + public static final int STORE_MANIFEST_VALUE = 5; + + + public final int getNumber() { return value; } + + public static BackupPhase valueOf(int value) { + switch (value) { + case 0: return REQUEST; + case 1: return SNAPSHOT; + case 2: return PREPARE_INCREMENTAL; + case 3: return SNAPSHOTCOPY; + case 4: return INCREMENTAL_COPY; + case 5: return STORE_MANIFEST; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public BackupPhase findValueByNumber(int number) { + return BackupPhase.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDescriptor().getEnumTypes().get(1); + } + + private static final BackupPhase[] VALUES = values(); + + public static BackupPhase valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private BackupPhase(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.BackupContext.BackupPhase) + } + + private int bitField0_; + // required string backup_id = 1; + public static final int BACKUP_ID_FIELD_NUMBER = 1; + private java.lang.Object backupId_; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + backupId_ = s; + } + return s; + } + } + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .hbase.pb.BackupType type = 2; + public static final int TYPE_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_; + /** + * required .hbase.pb.BackupType type = 2; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.BackupType type = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { + return type_; + } + + // required string target_root_dir = 3; + public static final int TARGET_ROOT_DIR_FIELD_NUMBER = 3; + private java.lang.Object targetRootDir_; + /** + * required string target_root_dir = 3; + */ + public boolean hasTargetRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string target_root_dir = 3; + */ + public java.lang.String getTargetRootDir() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetRootDir_ = s; + } + return s; + } + } + /** + * required string target_root_dir = 3; + */ + public com.google.protobuf.ByteString + getTargetRootDirBytes() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetRootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .hbase.pb.BackupContext.BackupState state = 4; + public static final int STATE_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState state_; + /** + * optional .hbase.pb.BackupContext.BackupState state = 4; + */ + public boolean hasState() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .hbase.pb.BackupContext.BackupState state = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState getState() { + return state_; + } + + // optional .hbase.pb.BackupContext.BackupPhase phase = 5; + public static final int PHASE_FIELD_NUMBER = 5; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase phase_; + /** + * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + */ + public boolean hasPhase() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase getPhase() { + return phase_; + } + + // optional string failed_message = 6; + public static final int FAILED_MESSAGE_FIELD_NUMBER = 6; + private java.lang.Object failedMessage_; + /** + * optional string failed_message = 6; + */ + public boolean hasFailedMessage() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional string failed_message = 6; + */ + public java.lang.String getFailedMessage() { + java.lang.Object ref = failedMessage_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + failedMessage_ = s; + } + return s; + } + } + /** + * optional string failed_message = 6; + */ + public com.google.protobuf.ByteString + getFailedMessageBytes() { + java.lang.Object ref = failedMessage_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + failedMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + public static final int TABLE_BACKUP_STATUS_FIELD_NUMBER = 7; + private java.util.List tableBackupStatus_; + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public java.util.List getTableBackupStatusList() { + return tableBackupStatus_; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public java.util.List + getTableBackupStatusOrBuilderList() { + return tableBackupStatus_; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public int getTableBackupStatusCount() { + return tableBackupStatus_.size(); + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getTableBackupStatus(int index) { + return tableBackupStatus_.get(index); + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getTableBackupStatusOrBuilder( + int index) { + return tableBackupStatus_.get(index); + } + + // optional uint64 start_ts = 8; + public static final int START_TS_FIELD_NUMBER = 8; + private long startTs_; + /** + * optional uint64 start_ts = 8; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional uint64 start_ts = 8; + */ + public long getStartTs() { + return startTs_; + } + + // optional uint64 end_ts = 9; + public static final int END_TS_FIELD_NUMBER = 9; + private long endTs_; + /** + * optional uint64 end_ts = 9; + */ + public boolean hasEndTs() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional uint64 end_ts = 9; + */ + public long getEndTs() { + return endTs_; + } + + // optional int64 total_bytes_copied = 10; + public static final int TOTAL_BYTES_COPIED_FIELD_NUMBER = 10; + private long totalBytesCopied_; + /** + * optional int64 total_bytes_copied = 10; + */ + public boolean hasTotalBytesCopied() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional int64 total_bytes_copied = 10; + */ + public long getTotalBytesCopied() { + return totalBytesCopied_; + } + + // optional string hlog_target_dir = 11; + public static final int HLOG_TARGET_DIR_FIELD_NUMBER = 11; + private java.lang.Object hlogTargetDir_; + /** + * optional string hlog_target_dir = 11; + */ + public boolean hasHlogTargetDir() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional string hlog_target_dir = 11; + */ + public java.lang.String getHlogTargetDir() { + java.lang.Object ref = hlogTargetDir_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + hlogTargetDir_ = s; + } + return s; + } + } + /** + * optional string hlog_target_dir = 11; + */ + public com.google.protobuf.ByteString + getHlogTargetDirBytes() { + java.lang.Object ref = hlogTargetDir_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + hlogTargetDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional uint32 progress = 12; + public static final int PROGRESS_FIELD_NUMBER = 12; + private int progress_; + /** + * optional uint32 progress = 12; + */ + public boolean hasProgress() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional uint32 progress = 12; + */ + public int getProgress() { + return progress_; + } + + private void initFields() { + backupId_ = ""; + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + targetRootDir_ = ""; + state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.WAITING; + phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.REQUEST; + failedMessage_ = ""; + tableBackupStatus_ = java.util.Collections.emptyList(); + startTs_ = 0L; + endTs_ = 0L; + totalBytesCopied_ = 0L; + hlogTargetDir_ = ""; + progress_ = 0; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBackupId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTargetRootDir()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTableBackupStatusCount(); i++) { + if (!getTableBackupStatus(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getBackupIdBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeEnum(2, type_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getTargetRootDirBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeEnum(4, state_.getNumber()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeEnum(5, phase_.getNumber()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeBytes(6, getFailedMessageBytes()); + } + for (int i = 0; i < tableBackupStatus_.size(); i++) { + output.writeMessage(7, tableBackupStatus_.get(i)); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeUInt64(8, startTs_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeUInt64(9, endTs_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + output.writeInt64(10, totalBytesCopied_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + output.writeBytes(11, getHlogTargetDirBytes()); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + output.writeUInt32(12, progress_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getBackupIdBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(2, type_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getTargetRootDirBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(4, state_.getNumber()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(5, phase_.getNumber()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(6, getFailedMessageBytes()); + } + for (int i = 0; i < tableBackupStatus_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, tableBackupStatus_.get(i)); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(8, startTs_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(9, endTs_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(10, totalBytesCopied_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(11, getHlogTargetDirBytes()); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(12, progress_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext) obj; + + boolean result = true; + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); + } + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && + (getType() == other.getType()); + } + result = result && (hasTargetRootDir() == other.hasTargetRootDir()); + if (hasTargetRootDir()) { + result = result && getTargetRootDir() + .equals(other.getTargetRootDir()); + } + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); + } + result = result && (hasPhase() == other.hasPhase()); + if (hasPhase()) { + result = result && + (getPhase() == other.getPhase()); + } + result = result && (hasFailedMessage() == other.hasFailedMessage()); + if (hasFailedMessage()) { + result = result && getFailedMessage() + .equals(other.getFailedMessage()); + } + result = result && getTableBackupStatusList() + .equals(other.getTableBackupStatusList()); + result = result && (hasStartTs() == other.hasStartTs()); + if (hasStartTs()) { + result = result && (getStartTs() + == other.getStartTs()); + } + result = result && (hasEndTs() == other.hasEndTs()); + if (hasEndTs()) { + result = result && (getEndTs() + == other.getEndTs()); + } + result = result && (hasTotalBytesCopied() == other.hasTotalBytesCopied()); + if (hasTotalBytesCopied()) { + result = result && (getTotalBytesCopied() + == other.getTotalBytesCopied()); + } + result = result && (hasHlogTargetDir() == other.hasHlogTargetDir()); + if (hasHlogTargetDir()) { + result = result && getHlogTargetDir() + .equals(other.getHlogTargetDir()); + } + result = result && (hasProgress() == other.hasProgress()); + if (hasProgress()) { + result = result && (getProgress() + == other.getProgress()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + } + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getType()); + } + if (hasTargetRootDir()) { + hash = (37 * hash) + TARGET_ROOT_DIR_FIELD_NUMBER; + hash = (53 * hash) + getTargetRootDir().hashCode(); + } + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); + } + if (hasPhase()) { + hash = (37 * hash) + PHASE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getPhase()); + } + if (hasFailedMessage()) { + hash = (37 * hash) + FAILED_MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getFailedMessage().hashCode(); + } + if (getTableBackupStatusCount() > 0) { + hash = (37 * hash) + TABLE_BACKUP_STATUS_FIELD_NUMBER; + hash = (53 * hash) + getTableBackupStatusList().hashCode(); + } + if (hasStartTs()) { + hash = (37 * hash) + START_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartTs()); + } + if (hasEndTs()) { + hash = (37 * hash) + END_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getEndTs()); + } + if (hasTotalBytesCopied()) { + hash = (37 * hash) + TOTAL_BYTES_COPIED_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTotalBytesCopied()); + } + if (hasHlogTargetDir()) { + hash = (37 * hash) + HLOG_TARGET_DIR_FIELD_NUMBER; + hash = (53 * hash) + getHlogTargetDir().hashCode(); + } + if (hasProgress()) { + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = (53 * hash) + getProgress(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupContext} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContextOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableBackupStatusFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + backupId_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + bitField0_ = (bitField0_ & ~0x00000002); + targetRootDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.WAITING; + bitField0_ = (bitField0_ & ~0x00000008); + phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.REQUEST; + bitField0_ = (bitField0_ & ~0x00000010); + failedMessage_ = ""; + bitField0_ = (bitField0_ & ~0x00000020); + if (tableBackupStatusBuilder_ == null) { + tableBackupStatus_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + } else { + tableBackupStatusBuilder_.clear(); + } + startTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000080); + endTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000100); + totalBytesCopied_ = 0L; + bitField0_ = (bitField0_ & ~0x00000200); + hlogTargetDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000400); + progress_ = 0; + bitField0_ = (bitField0_ & ~0x00000800); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.backupId_ = backupId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.type_ = type_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.targetRootDir_ = targetRootDir_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.state_ = state_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.phase_ = phase_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.failedMessage_ = failedMessage_; + if (tableBackupStatusBuilder_ == null) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { + tableBackupStatus_ = java.util.Collections.unmodifiableList(tableBackupStatus_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.tableBackupStatus_ = tableBackupStatus_; + } else { + result.tableBackupStatus_ = tableBackupStatusBuilder_.build(); + } + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000040; + } + result.startTs_ = startTs_; + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { + to_bitField0_ |= 0x00000080; + } + result.endTs_ = endTs_; + if (((from_bitField0_ & 0x00000200) == 0x00000200)) { + to_bitField0_ |= 0x00000100; + } + result.totalBytesCopied_ = totalBytesCopied_; + if (((from_bitField0_ & 0x00000400) == 0x00000400)) { + to_bitField0_ |= 0x00000200; + } + result.hlogTargetDir_ = hlogTargetDir_; + if (((from_bitField0_ & 0x00000800) == 0x00000800)) { + to_bitField0_ |= 0x00000400; + } + result.progress_ = progress_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance()) return this; + if (other.hasBackupId()) { + bitField0_ |= 0x00000001; + backupId_ = other.backupId_; + onChanged(); + } + if (other.hasType()) { + setType(other.getType()); + } + if (other.hasTargetRootDir()) { + bitField0_ |= 0x00000004; + targetRootDir_ = other.targetRootDir_; + onChanged(); + } + if (other.hasState()) { + setState(other.getState()); + } + if (other.hasPhase()) { + setPhase(other.getPhase()); + } + if (other.hasFailedMessage()) { + bitField0_ |= 0x00000020; + failedMessage_ = other.failedMessage_; + onChanged(); + } + if (tableBackupStatusBuilder_ == null) { + if (!other.tableBackupStatus_.isEmpty()) { + if (tableBackupStatus_.isEmpty()) { + tableBackupStatus_ = other.tableBackupStatus_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.addAll(other.tableBackupStatus_); + } + onChanged(); + } + } else { + if (!other.tableBackupStatus_.isEmpty()) { + if (tableBackupStatusBuilder_.isEmpty()) { + tableBackupStatusBuilder_.dispose(); + tableBackupStatusBuilder_ = null; + tableBackupStatus_ = other.tableBackupStatus_; + bitField0_ = (bitField0_ & ~0x00000040); + tableBackupStatusBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableBackupStatusFieldBuilder() : null; + } else { + tableBackupStatusBuilder_.addAllMessages(other.tableBackupStatus_); + } + } + } + if (other.hasStartTs()) { + setStartTs(other.getStartTs()); + } + if (other.hasEndTs()) { + setEndTs(other.getEndTs()); + } + if (other.hasTotalBytesCopied()) { + setTotalBytesCopied(other.getTotalBytesCopied()); + } + if (other.hasHlogTargetDir()) { + bitField0_ |= 0x00000400; + hlogTargetDir_ = other.hlogTargetDir_; + onChanged(); + } + if (other.hasProgress()) { + setProgress(other.getProgress()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBackupId()) { + + return false; + } + if (!hasType()) { + + return false; + } + if (!hasTargetRootDir()) { + + return false; + } + for (int i = 0; i < getTableBackupStatusCount(); i++) { + if (!getTableBackupStatus(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string backup_id = 1; + private java.lang.Object backupId_ = ""; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string backup_id = 1; + */ + public Builder setBackupId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; + onChanged(); + return this; + } + /** + * required string backup_id = 1; + */ + public Builder clearBackupId() { + bitField0_ = (bitField0_ & ~0x00000001); + backupId_ = getDefaultInstance().getBackupId(); + onChanged(); + return this; + } + /** + * required string backup_id = 1; + */ + public Builder setBackupIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; + onChanged(); + return this; + } + + // required .hbase.pb.BackupType type = 2; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + /** + * required .hbase.pb.BackupType type = 2; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.BackupType type = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { + return type_; + } + /** + * required .hbase.pb.BackupType type = 2; + */ + public Builder setType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + type_ = value; + onChanged(); + return this; + } + /** + * required .hbase.pb.BackupType type = 2; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000002); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + onChanged(); + return this; + } + + // required string target_root_dir = 3; + private java.lang.Object targetRootDir_ = ""; + /** + * required string target_root_dir = 3; + */ + public boolean hasTargetRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string target_root_dir = 3; + */ + public java.lang.String getTargetRootDir() { + java.lang.Object ref = targetRootDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetRootDir_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string target_root_dir = 3; + */ + public com.google.protobuf.ByteString + getTargetRootDirBytes() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetRootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string target_root_dir = 3; + */ + public Builder setTargetRootDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + targetRootDir_ = value; + onChanged(); + return this; + } + /** + * required string target_root_dir = 3; + */ + public Builder clearTargetRootDir() { + bitField0_ = (bitField0_ & ~0x00000004); + targetRootDir_ = getDefaultInstance().getTargetRootDir(); + onChanged(); + return this; + } + /** + * required string target_root_dir = 3; + */ + public Builder setTargetRootDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + targetRootDir_ = value; + onChanged(); + return this; + } + + // optional .hbase.pb.BackupContext.BackupState state = 4; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.WAITING; + /** + * optional .hbase.pb.BackupContext.BackupState state = 4; + */ + public boolean hasState() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .hbase.pb.BackupContext.BackupState state = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState getState() { + return state_; + } + /** + * optional .hbase.pb.BackupContext.BackupState state = 4; + */ + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + state_ = value; + onChanged(); + return this; + } + /** + * optional .hbase.pb.BackupContext.BackupState state = 4; + */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000008); + state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.WAITING; + onChanged(); + return this; + } + + // optional .hbase.pb.BackupContext.BackupPhase phase = 5; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.REQUEST; + /** + * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + */ + public boolean hasPhase() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase getPhase() { + return phase_; + } + /** + * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + */ + public Builder setPhase(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + phase_ = value; + onChanged(); + return this; + } + /** + * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + */ + public Builder clearPhase() { + bitField0_ = (bitField0_ & ~0x00000010); + phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.REQUEST; + onChanged(); + return this; + } + + // optional string failed_message = 6; + private java.lang.Object failedMessage_ = ""; + /** + * optional string failed_message = 6; + */ + public boolean hasFailedMessage() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional string failed_message = 6; + */ + public java.lang.String getFailedMessage() { + java.lang.Object ref = failedMessage_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + failedMessage_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string failed_message = 6; + */ + public com.google.protobuf.ByteString + getFailedMessageBytes() { + java.lang.Object ref = failedMessage_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + failedMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string failed_message = 6; + */ + public Builder setFailedMessage( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + failedMessage_ = value; + onChanged(); + return this; + } + /** + * optional string failed_message = 6; + */ + public Builder clearFailedMessage() { + bitField0_ = (bitField0_ & ~0x00000020); + failedMessage_ = getDefaultInstance().getFailedMessage(); + onChanged(); + return this; + } + /** + * optional string failed_message = 6; + */ + public Builder setFailedMessageBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + failedMessage_ = value; + onChanged(); + return this; + } + + // repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + private java.util.List tableBackupStatus_ = + java.util.Collections.emptyList(); + private void ensureTableBackupStatusIsMutable() { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { + tableBackupStatus_ = new java.util.ArrayList(tableBackupStatus_); + bitField0_ |= 0x00000040; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder> tableBackupStatusBuilder_; + + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public java.util.List getTableBackupStatusList() { + if (tableBackupStatusBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableBackupStatus_); + } else { + return tableBackupStatusBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public int getTableBackupStatusCount() { + if (tableBackupStatusBuilder_ == null) { + return tableBackupStatus_.size(); + } else { + return tableBackupStatusBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getTableBackupStatus(int index) { + if (tableBackupStatusBuilder_ == null) { + return tableBackupStatus_.get(index); + } else { + return tableBackupStatusBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder setTableBackupStatus( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) { + if (tableBackupStatusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.set(index, value); + onChanged(); + } else { + tableBackupStatusBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder setTableBackupStatus( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.set(index, builderForValue.build()); + onChanged(); + } else { + tableBackupStatusBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder addTableBackupStatus(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) { + if (tableBackupStatusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.add(value); + onChanged(); + } else { + tableBackupStatusBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder addTableBackupStatus( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) { + if (tableBackupStatusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.add(index, value); + onChanged(); + } else { + tableBackupStatusBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder addTableBackupStatus( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.add(builderForValue.build()); + onChanged(); + } else { + tableBackupStatusBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder addTableBackupStatus( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.add(index, builderForValue.build()); + onChanged(); + } else { + tableBackupStatusBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder addAllTableBackupStatus( + java.lang.Iterable values) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + super.addAll(values, tableBackupStatus_); + onChanged(); + } else { + tableBackupStatusBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder clearTableBackupStatus() { + if (tableBackupStatusBuilder_ == null) { + tableBackupStatus_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + } else { + tableBackupStatusBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder removeTableBackupStatus(int index) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.remove(index); + onChanged(); + } else { + tableBackupStatusBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder getTableBackupStatusBuilder( + int index) { + return getTableBackupStatusFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getTableBackupStatusOrBuilder( + int index) { + if (tableBackupStatusBuilder_ == null) { + return tableBackupStatus_.get(index); } else { + return tableBackupStatusBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public java.util.List + getTableBackupStatusOrBuilderList() { + if (tableBackupStatusBuilder_ != null) { + return tableBackupStatusBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableBackupStatus_); + } + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder addTableBackupStatusBuilder() { + return getTableBackupStatusFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder addTableBackupStatusBuilder( + int index) { + return getTableBackupStatusFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public java.util.List + getTableBackupStatusBuilderList() { + return getTableBackupStatusFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder> + getTableBackupStatusFieldBuilder() { + if (tableBackupStatusBuilder_ == null) { + tableBackupStatusBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder>( + tableBackupStatus_, + ((bitField0_ & 0x00000040) == 0x00000040), + getParentForChildren(), + isClean()); + tableBackupStatus_ = null; + } + return tableBackupStatusBuilder_; + } + + // optional uint64 start_ts = 8; + private long startTs_ ; + /** + * optional uint64 start_ts = 8; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional uint64 start_ts = 8; + */ + public long getStartTs() { + return startTs_; + } + /** + * optional uint64 start_ts = 8; + */ + public Builder setStartTs(long value) { + bitField0_ |= 0x00000080; + startTs_ = value; + onChanged(); + return this; + } + /** + * optional uint64 start_ts = 8; + */ + public Builder clearStartTs() { + bitField0_ = (bitField0_ & ~0x00000080); + startTs_ = 0L; + onChanged(); + return this; + } + + // optional uint64 end_ts = 9; + private long endTs_ ; + /** + * optional uint64 end_ts = 9; + */ + public boolean hasEndTs() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional uint64 end_ts = 9; + */ + public long getEndTs() { + return endTs_; + } + /** + * optional uint64 end_ts = 9; + */ + public Builder setEndTs(long value) { + bitField0_ |= 0x00000100; + endTs_ = value; + onChanged(); + return this; + } + /** + * optional uint64 end_ts = 9; + */ + public Builder clearEndTs() { + bitField0_ = (bitField0_ & ~0x00000100); + endTs_ = 0L; + onChanged(); + return this; + } + + // optional int64 total_bytes_copied = 10; + private long totalBytesCopied_ ; + /** + * optional int64 total_bytes_copied = 10; + */ + public boolean hasTotalBytesCopied() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional int64 total_bytes_copied = 10; + */ + public long getTotalBytesCopied() { + return totalBytesCopied_; + } + /** + * optional int64 total_bytes_copied = 10; + */ + public Builder setTotalBytesCopied(long value) { + bitField0_ |= 0x00000200; + totalBytesCopied_ = value; + onChanged(); + return this; + } + /** + * optional int64 total_bytes_copied = 10; + */ + public Builder clearTotalBytesCopied() { + bitField0_ = (bitField0_ & ~0x00000200); + totalBytesCopied_ = 0L; + onChanged(); + return this; + } + + // optional string hlog_target_dir = 11; + private java.lang.Object hlogTargetDir_ = ""; + /** + * optional string hlog_target_dir = 11; + */ + public boolean hasHlogTargetDir() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional string hlog_target_dir = 11; + */ + public java.lang.String getHlogTargetDir() { + java.lang.Object ref = hlogTargetDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + hlogTargetDir_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string hlog_target_dir = 11; + */ + public com.google.protobuf.ByteString + getHlogTargetDirBytes() { + java.lang.Object ref = hlogTargetDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + hlogTargetDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string hlog_target_dir = 11; + */ + public Builder setHlogTargetDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + hlogTargetDir_ = value; + onChanged(); + return this; + } + /** + * optional string hlog_target_dir = 11; + */ + public Builder clearHlogTargetDir() { + bitField0_ = (bitField0_ & ~0x00000400); + hlogTargetDir_ = getDefaultInstance().getHlogTargetDir(); + onChanged(); + return this; + } + /** + * optional string hlog_target_dir = 11; + */ + public Builder setHlogTargetDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + hlogTargetDir_ = value; + onChanged(); + return this; + } + + // optional uint32 progress = 12; + private int progress_ ; + /** + * optional uint32 progress = 12; + */ + public boolean hasProgress() { + return ((bitField0_ & 0x00000800) == 0x00000800); + } + /** + * optional uint32 progress = 12; + */ + public int getProgress() { + return progress_; + } + /** + * optional uint32 progress = 12; + */ + public Builder setProgress(int value) { + bitField0_ |= 0x00000800; + progress_ = value; + onChanged(); + return this; + } + /** + * optional uint32 progress = 12; + */ + public Builder clearProgress() { + bitField0_ = (bitField0_ & ~0x00000800); + progress_ = 0; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupContext) + } + + static { + defaultInstance = new BackupContext(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BackupContext) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_BackupImage_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_BackupImage_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ServerTimestamp_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_TableServerTimestamp_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_BackupManifest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_BackupManifest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_TableBackupStatus_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_BackupContext_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_BackupContext_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\014Backup.proto\022\010hbase.pb\032\013HBase.proto\"\327\001" + + "\n\013BackupImage\022\021\n\tbackup_id\030\001 \002(\t\022)\n\013back" + + "up_type\030\002 \002(\0162\024.hbase.pb.BackupType\022\020\n\010r" + + "oot_dir\030\003 \002(\t\022\'\n\ntable_list\030\004 \003(\0132\023.hbas" + + "e.pb.TableName\022\020\n\010start_ts\030\005 \002(\004\022\023\n\013comp" + + "lete_ts\030\006 \002(\004\022(\n\tancestors\030\007 \003(\0132\025.hbase" + + ".pb.BackupImage\"4\n\017ServerTimestamp\022\016\n\006se" + + "rver\030\001 \002(\t\022\021\n\ttimestamp\030\002 \002(\004\"o\n\024TableSe" + + "rverTimestamp\022\"\n\005table\030\001 \002(\0132\023.hbase.pb." + + "TableName\0223\n\020server_timestamp\030\002 \003(\0132\031.hb", + "ase.pb.ServerTimestamp\"\313\002\n\016BackupManifes" + + "t\022\017\n\007version\030\001 \002(\t\022\021\n\tbackup_id\030\002 \002(\t\022\"\n" + + "\004type\030\003 \002(\0162\024.hbase.pb.BackupType\022\'\n\ntab" + + "le_list\030\004 \003(\0132\023.hbase.pb.TableName\022\020\n\010st" + + "art_ts\030\005 \002(\004\022\023\n\013complete_ts\030\006 \002(\004\022\023\n\013tot" + + "al_bytes\030\007 \002(\003\022\021\n\tlog_bytes\030\010 \001(\003\022/\n\007tst" + + "_map\030\t \003(\0132\036.hbase.pb.TableServerTimesta" + + "mp\0225\n\026dependent_backup_image\030\n \003(\0132\025.hba" + + "se.pb.BackupImage\022\021\n\tcompacted\030\013 \002(\010\"]\n\021" + + "TableBackupStatus\022\"\n\005table\030\001 \002(\0132\023.hbase", + ".pb.TableName\022\022\n\ntarget_dir\030\002 \002(\t\022\020\n\010sna" + + "pshot\030\003 \001(\t\"\323\004\n\rBackupContext\022\021\n\tbackup_" + + "id\030\001 \002(\t\022\"\n\004type\030\002 \002(\0162\024.hbase.pb.Backup" + + "Type\022\027\n\017target_root_dir\030\003 \002(\t\0222\n\005state\030\004" + + " \001(\0162#.hbase.pb.BackupContext.BackupStat" + + "e\0222\n\005phase\030\005 \001(\0162#.hbase.pb.BackupContex" + + "t.BackupPhase\022\026\n\016failed_message\030\006 \001(\t\0228\n" + + "\023table_backup_status\030\007 \003(\0132\033.hbase.pb.Ta" + + "bleBackupStatus\022\020\n\010start_ts\030\010 \001(\004\022\016\n\006end" + + "_ts\030\t \001(\004\022\032\n\022total_bytes_copied\030\n \001(\003\022\027\n", + "\017hlog_target_dir\030\013 \001(\t\022\020\n\010progress\030\014 \001(\r" + + "\"P\n\013BackupState\022\013\n\007WAITING\020\000\022\013\n\007RUNNING\020" + + "\001\022\014\n\010COMPLETE\020\002\022\n\n\006FAILED\020\003\022\r\n\tCANCELLED" + + "\020\004\"}\n\013BackupPhase\022\013\n\007REQUEST\020\000\022\014\n\010SNAPSH" + + "OT\020\001\022\027\n\023PREPARE_INCREMENTAL\020\002\022\020\n\014SNAPSHO" + + "TCOPY\020\003\022\024\n\020INCREMENTAL_COPY\020\004\022\022\n\016STORE_M" + + "ANIFEST\020\005*\'\n\nBackupType\022\010\n\004FULL\020\000\022\017\n\013INC" + + "REMENTAL\020\001BB\n*org.apache.hadoop.hbase.pr" + + "otobuf.generatedB\014BackupProtosH\001\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_hbase_pb_BackupImage_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_hbase_pb_BackupImage_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_BackupImage_descriptor, + new java.lang.String[] { "BackupId", "BackupType", "RootDir", "TableList", "StartTs", "CompleteTs", "Ancestors", }); + internal_static_hbase_pb_ServerTimestamp_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_ServerTimestamp_descriptor, + new java.lang.String[] { "Server", "Timestamp", }); + internal_static_hbase_pb_TableServerTimestamp_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_TableServerTimestamp_descriptor, + new java.lang.String[] { "Table", "ServerTimestamp", }); + internal_static_hbase_pb_BackupManifest_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_hbase_pb_BackupManifest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_BackupManifest_descriptor, + new java.lang.String[] { "Version", "BackupId", "Type", "TableList", "StartTs", "CompleteTs", "TotalBytes", "LogBytes", "TstMap", "DependentBackupImage", "Compacted", }); + internal_static_hbase_pb_TableBackupStatus_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_TableBackupStatus_descriptor, + new java.lang.String[] { "Table", "TargetDir", "Snapshot", }); + internal_static_hbase_pb_BackupContext_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_hbase_pb_BackupContext_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_BackupContext_descriptor, + new java.lang.String[] { "BackupId", "Type", "TargetRootDir", "State", "Phase", "FailedMessage", "TableBackupStatus", "StartTs", "EndTs", "TotalBytesCopied", "HlogTargetDir", "Progress", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hbase-protocol/src/main/protobuf/Backup.proto b/hbase-protocol/src/main/protobuf/Backup.proto new file mode 100644 index 0000000..383b990 --- /dev/null +++ b/hbase-protocol/src/main/protobuf/Backup.proto @@ -0,0 +1,105 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file contains Backup manifest +package hbase.pb; + +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "BackupProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "HBase.proto"; + +enum BackupType { + FULL = 0; + INCREMENTAL = 1; +} + +message BackupImage { + required string backup_id = 1; + required BackupType backup_type = 2; + required string root_dir = 3; + repeated TableName table_list = 4; + required uint64 start_ts = 5; + required uint64 complete_ts = 6; + repeated BackupImage ancestors = 7; +} + +message ServerTimestamp { + required string server = 1; + required uint64 timestamp = 2; +} + +message TableServerTimestamp { + required TableName table = 1; + repeated ServerTimestamp server_timestamp = 2; +} + +message BackupManifest { + required string version = 1; + required string backup_id = 2; + required BackupType type = 3; + repeated TableName table_list = 4; + required uint64 start_ts = 5; + required uint64 complete_ts = 6; + required int64 total_bytes = 7; + optional int64 log_bytes = 8; + repeated TableServerTimestamp tst_map = 9; + repeated BackupImage dependent_backup_image = 10; + required bool compacted = 11; +} + +message TableBackupStatus { + required TableName table = 1; + required string target_dir = 2; + optional string snapshot = 3; +} + +message BackupContext { + required string backup_id = 1; + required BackupType type = 2; + required string target_root_dir = 3; + optional BackupState state = 4; + optional BackupPhase phase = 5; + optional string failed_message = 6; + repeated TableBackupStatus table_backup_status = 7; + optional uint64 start_ts = 8; + optional uint64 end_ts = 9; + optional int64 total_bytes_copied = 10; + optional string hlog_target_dir = 11; + optional uint32 progress = 12; + + enum BackupState { + WAITING = 0; + RUNNING = 1; + COMPLETE = 2; + FAILED = 3; + CANCELLED = 4; + } + + enum BackupPhase { + REQUEST = 0; + SNAPSHOT = 1; + PREPARE_INCREMENTAL = 2; + SNAPSHOTCOPY = 3; + INCREMENTAL_COPY = 4; + STORE_MANIFEST = 5; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java new file mode 100644 index 0000000..7c8ea39 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; + +public interface BackupClient { + + public void setConf(Configuration conf); + + /** + * Send backup request to server, and monitor the progress if necessary + * @param backupType : full or incremental + * @param targetRootDir : the root path specified by user + * @param tableList : the table list specified by user + * @return backupId backup id + * @throws IOException exception + */ + public String create(BackupType backupType, List tableList, + String targetRootDir) throws IOException; + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java new file mode 100644 index 0000000..015c80b --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java @@ -0,0 +1,119 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import java.io.IOException; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.cli.PosixParser; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.backup.impl.BackupCommands; +import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; +import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants.BackupCommand; +import org.apache.hadoop.hbase.util.AbstractHBaseTool; +import org.apache.hadoop.hbase.util.LogUtils; +import org.apache.hadoop.util.ToolRunner; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; + +public class BackupDriver extends AbstractHBaseTool { + + private static final Log LOG = LogFactory.getLog(BackupDriver.class); + private Options opt; + private CommandLine cmd; + + protected void init() throws IOException { + // define supported options + opt = new Options(); + opt.addOption("debug", false, "Enable debug loggings"); + + // disable irrelevant loggers to avoid it mess up command output + LogUtils.disableUselessLoggers(LOG); + } + + private int parseAndRun(String[] args) throws IOException { + String cmd = null; + String[] remainArgs = null; + if (args == null || args.length == 0) { + BackupCommands.createCommand(getConf(), + BackupRestoreConstants.BackupCommand.HELP, null).execute(); + } else { + cmd = args[0]; + remainArgs = new String[args.length - 1]; + if (args.length > 1) { + System.arraycopy(args, 1, remainArgs, 0, args.length - 1); + } + } + CommandLine cmdline = null; + try { + cmdline = new PosixParser().parse(opt, remainArgs); + } catch (ParseException e) { + LOG.error("Could not parse command", e); + return -1; + } + + BackupCommand type = BackupCommand.HELP; + if (BackupCommand.CREATE.name().equalsIgnoreCase(cmd)) { + type = BackupCommand.CREATE; + } else if (BackupCommand.HELP.name().equalsIgnoreCase(cmd)) { + type = BackupCommand.HELP; + } else { + System.out.println("Unsupported command for backup: " + cmd); + return -1; + } + + // enable debug logging + Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup"); + if (cmdline.hasOption("debug")) { + backupClientLogger.setLevel(Level.DEBUG); + } else { + backupClientLogger.setLevel(Level.INFO); + } + + // TODO: get rid of Command altogether? + BackupCommands.createCommand(getConf(), type, cmdline).execute(); + return 0; + } + + @Override + protected void addOptions() { + } + + @Override + protected void processOptions(CommandLine cmd) { + this.cmd = cmd; + } + + @Override + protected int doWork() throws Exception { + init(); + return parseAndRun(cmd.getArgs()); + } + + public static void main(String[] args) throws Exception { + Configuration conf = HBaseConfiguration.create(); + int ret = ToolRunner.run(conf, new BackupDriver(), args); + System.exit(ret); + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java new file mode 100644 index 0000000..6fbfe18 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.backup.impl.BackupClientImpl; +import org.apache.hadoop.hbase.backup.impl.BackupCopyService; +import org.apache.hadoop.hbase.backup.impl.IncrementalRestoreService; +import org.apache.hadoop.hbase.backup.impl.RestoreClientImpl; +import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupCopyService; +import org.apache.hadoop.hbase.backup.mapreduce.MapReduceRestoreService; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.util.ReflectionUtils; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class BackupRestoreFactory { + + public final static String HBASE_INCR_RESTORE_IMPL_CLASS = "hbase.incremental.restore.class"; + public final static String HBASE_BACKUP_COPY_IMPL_CLASS = "hbase.backup.copy.class"; + public final static String HBASE_BACKUP_CLIENT_IMPL_CLASS = "hbase.backup.client.class"; + public final static String HBASE_RESTORE_CLIENT_IMPL_CLASS = "hbase.restore.client.class"; + + private BackupRestoreFactory(){ + throw new AssertionError("Instantiating utility class..."); + } + + /** + * Gets incremental restore service + * @param conf - configuration + * @return incremental backup service instance + */ + public static IncrementalRestoreService getIncrementalRestoreService(Configuration conf) { + Class cls = + conf.getClass(HBASE_INCR_RESTORE_IMPL_CLASS, MapReduceRestoreService.class, + IncrementalRestoreService.class); + return ReflectionUtils.newInstance(cls, conf); + } + + /** + * Gets backup copy service + * @param conf - configuration + * @return backup copy service + */ + public static BackupCopyService getBackupCopyService(Configuration conf) { + Class cls = + conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS, MapReduceBackupCopyService.class, + BackupCopyService.class); + return ReflectionUtils.newInstance(cls, conf); + } + + /** + * Gets backup client implementation + * @param conf - configuration + * @return backup client + */ + public static BackupClient getBackupClient(Configuration conf) { + Class cls = + conf.getClass(HBASE_BACKUP_CLIENT_IMPL_CLASS, BackupClientImpl.class, + BackupClient.class); + BackupClient client = ReflectionUtils.newInstance(cls, conf); + client.setConf(conf); + return client; + } + + /** + * Gets restore client implementation + * @param conf - configuration + * @return backup client + */ + public static RestoreClient getRestoreClient(Configuration conf) { + Class cls = + conf.getClass(HBASE_RESTORE_CLIENT_IMPL_CLASS, RestoreClientImpl.class, + RestoreClient.class); + RestoreClient client = ReflectionUtils.newInstance(cls, conf); + client.setConf(conf); + return client; + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/backup/BackupType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/backup/BackupType.java new file mode 100644 index 0000000..e2e3446 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/backup/BackupType.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +public enum BackupType { + FULL, INCREMENTAL +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java new file mode 100644 index 0000000..4b14612 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java @@ -0,0 +1,451 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.TreeMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupManifest; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.io.HFileLink; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; +import org.apache.hadoop.hbase.regionserver.HStore; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.snapshot.SnapshotManifest; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * View to an on-disk Backup Image FileSytem + * Provides the set of methods necessary to interact with the on-disk Backup Image data. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class HBackupFileSystem { + public static final Log LOG = LogFactory.getLog(HBackupFileSystem.class); + + private final String RESTORE_TMP_PATH = "/tmp"; + private final String[] ignoreDirs = { "recovered.edits" }; + + private final Configuration conf; + private final FileSystem fs; + private final Path backupRootPath; + private final Path restoreTmpPath; + private final String backupId; + + /** + * Create a view to the on-disk Backup Image. + * @param conf to use + * @param backupPath to where the backup Image stored + * @param backupId represent backup Image + */ + public HBackupFileSystem(final Configuration conf, final Path backupRootPath, final String backupId) + throws IOException { + this.conf = conf; + this.fs = backupRootPath.getFileSystem(conf); + this.backupRootPath = backupRootPath; + this.backupId = backupId; // the backup ID for the lead backup Image + this.restoreTmpPath = new Path(conf.get("hbase.fs.tmp.dir") != null? + conf.get("hbase.fs.tmp.dir"): RESTORE_TMP_PATH, + "restore"); + } + + /** + * @param tableName is the table backuped + * @return {@link HTableDescriptor} saved in backup image of the table + */ + public HTableDescriptor getTableDesc(TableName tableName) + throws FileNotFoundException, IOException { + Path tableInfoPath = this.getTableInfoPath(tableName); + SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath); + SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc); + HTableDescriptor tableDescriptor = manifest.getTableDescriptor(); + if (!tableDescriptor.getNameAsString().equals(tableName)) { + LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " + + tableInfoPath.toString()); + LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getNameAsString()); + } + return tableDescriptor; + } + + /** + * Given the backup root dir, backup id and the table name, return the backup image location, + * which is also where the backup manifest file is. return value look like: + * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/default/t1_dn/backup_1396650096738" + * @param backupRootDir backup root directory + * @param backupId backup id + * @param table table name + * @return backupPath String for the particular table + */ + public static String getTableBackupDir(String backupRootDir, String backupId, + TableName tableName) { + return backupRootDir + Path.SEPARATOR + tableName.getNamespaceAsString() + Path.SEPARATOR + + tableName.getQualifierAsString() + Path.SEPARATOR + backupId; + } + + /** + * Given the backup root dir, backup id and the table name, return the backup image location, + * which is also where the backup manifest file is. return value look like: + * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/default/t1_dn/backup_1396650096738" + * @param tableName table name + * @return backupPath for the particular table + */ + public Path getTableBackupPath(TableName tableName) { + return new Path(this.backupRootPath, tableName.getNamespaceAsString() + Path.SEPARATOR + + tableName.getQualifierAsString() + Path.SEPARATOR + backupId); + } + + /** + * return value represent path for: + * ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/.hbase-snapshot" + * @param tableName table name + * @return path for snapshot + */ + public Path getTableSnapshotPath(TableName tableName) { + return new Path(this.getTableBackupPath(tableName), HConstants.SNAPSHOT_DIR_NAME); + } + + /** + * return value represent path for: + * "..../default/t1_dn/backup_1396650096738/.hbase-snapshot/snapshot_1396650097621_default_t1_dn" + * this path contains .snapshotinfo, .tabledesc (0.96 and 0.98) this path contains .snapshotinfo, + * .data.manifest (trunk) + * @param tableName table name + * @return path to table info + * @throws FileNotFoundException exception + * @throws IOException exception + */ + public Path getTableInfoPath(TableName tableName) throws FileNotFoundException, IOException { + + Path tableSnapShotPath = this.getTableSnapshotPath(tableName); + Path tableInfoPath = null; + + // can't build the path directly as the timestamp values are different + FileStatus[] snapshots = fs.listStatus(tableSnapShotPath); + for (FileStatus snapshot : snapshots) { + tableInfoPath = snapshot.getPath(); + // SnapshotManifest.DATA_MANIFEST_NAME = "data.manifest"; + if (tableInfoPath.getName().endsWith("data.manifest")) { + break; + } + } + return tableInfoPath; + } + + /** + * return value represent path for: + * ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/archive/data/default/t1_dn" + * @param tabelName table name + * @return path to table archive + * @throws IOException exception + */ + public Path getTableArchivePath(TableName tableName) throws IOException { + Path baseDir = new Path(getTableBackupPath(tableName), HConstants.HFILE_ARCHIVE_DIRECTORY); + Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR); + Path archivePath = new Path(dataDir, tableName.getNamespaceAsString()); + Path tableArchivePath = + new Path(archivePath, tableName.getQualifierAsString()); + if (!fs.exists(tableArchivePath) || !fs.getFileStatus(tableArchivePath).isDirectory()) { + LOG.debug("Folder tableArchivePath: " + tableArchivePath.toString() + " does not exists"); + tableArchivePath = null; // empty table has no archive + } + return tableArchivePath; + } + + /** + * Given the backup root dir and the backup id, return the log file location for an incremental + * backup. + * @param backupRootDir backup root directory + * @param backupId backup id + * @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738" + */ + public static String getLogBackupDir(String backupRootDir, String backupId) { + return backupRootDir + Path.SEPARATOR + HConstants.HREGION_LOGDIR_NAME + Path.SEPARATOR + + backupId; + } + + public static Path getLogBackupPath(String backupRootDir, String backupId) { + return new Path(getLogBackupDir(backupRootDir, backupId)); + } + + private Path getManifestPath(TableName tableName) throws IOException { + Path manifestPath = new Path(getTableBackupPath(tableName), BackupManifest.MANIFEST_FILE_NAME); + + if (!fs.exists(manifestPath)) { + // check log dir for incremental backup case + manifestPath = + new Path(getLogBackupDir(this.backupRootPath.toString(), this.backupId) + Path.SEPARATOR + + BackupManifest.MANIFEST_FILE_NAME); + if (!fs.exists(manifestPath)) { + String errorMsg = + "Could not find backup manifest for " + backupId + " in " + backupRootPath.toString(); + throw new IOException(errorMsg); + } + } + return manifestPath; + } + + public BackupManifest getManifest(TableName tableName) throws IOException { + BackupManifest manifest = new BackupManifest(conf, this.getManifestPath(tableName)); + return manifest; + } + + /** + * Gets region list + * @param tableName table name + * @return RegionList region list + * @throws FileNotFoundException exception + * @throws IOException exception + */ + + public ArrayList getRegionList(TableName tableName) throws FileNotFoundException, + IOException { + Path tableArchivePath = this.getTableArchivePath(tableName); + ArrayList regionDirList = new ArrayList(); + FileStatus[] children = fs.listStatus(tableArchivePath); + for (FileStatus childStatus : children) { + // here child refer to each region(Name) + Path child = childStatus.getPath(); + regionDirList.add(child); + } + return regionDirList; + } + + /** + * Gets region list + * @param tableArchivePath table archive path + * @return RegionList region list + * @throws FileNotFoundException exception + * @throws IOException exception + */ + public ArrayList getRegionList(Path tableArchivePath) throws FileNotFoundException, + IOException { + ArrayList regionDirList = new ArrayList(); + FileStatus[] children = fs.listStatus(tableArchivePath); + for (FileStatus childStatus : children) { + // here child refer to each region(Name) + Path child = childStatus.getPath(); + regionDirList.add(child); + } + return regionDirList; + } + + /** + * Counts the number of files in all subdirectories of an HBase tables, i.e. HFiles. And finds the + * maximum number of files in one HBase table. + * @param tableArchivePath archive path + * @return the maximum number of files found in 1 HBase table + * @throws IOException exception + */ + public int getMaxNumberOfFilesInSubDir(Path tableArchivePath) throws IOException { + int result = 1; + ArrayList regionPathList = this.getRegionList(tableArchivePath); + // tableArchivePath = this.getTableArchivePath(tableName); + + if (regionPathList == null || regionPathList.size() == 0) { + throw new IllegalStateException("Cannot restore hbase table because directory '" + + tableArchivePath + "' is not a directory."); + } + + for (Path regionPath : regionPathList) { + result = Math.max(result, getNumberOfFilesInDir(regionPath)); + } + return result; + } + + /** + * Counts the number of files in all subdirectories of an HBase table, i.e. HFiles. + * @param regionPath Path to an HBase table directory + * @return the number of files all directories + * @throws IOException exception + */ + public int getNumberOfFilesInDir(Path regionPath) throws IOException { + int result = 0; + + if (!fs.exists(regionPath) || !fs.getFileStatus(regionPath).isDirectory()) { + throw new IllegalStateException("Cannot restore hbase table because directory '" + + regionPath.toString() + "' is not a directory."); + } + + FileStatus[] tableDirContent = fs.listStatus(regionPath); + for (FileStatus subDirStatus : tableDirContent) { + FileStatus[] colFamilies = fs.listStatus(subDirStatus.getPath()); + for (FileStatus colFamilyStatus : colFamilies) { + FileStatus[] colFamilyContent = fs.listStatus(colFamilyStatus.getPath()); + result += colFamilyContent.length; + } + } + return result; + } + + /** + * Duplicate the backup image if it's on local cluster + * @see HStore#bulkLoadHFile(String, long) + * @see HRegionFileSystem#bulkLoadStoreFile(String familyName, Path srcPath, long seqNum) + * @param tableArchivePath archive path + * @return the new tableArchivePath + * @throws IOException exception + */ + public Path checkLocalAndBackup(Path tableArchivePath) throws IOException { + // Move the file if it's on local cluster + boolean isCopyNeeded = false; + + FileSystem srcFs = tableArchivePath.getFileSystem(conf); + FileSystem desFs = FileSystem.get(conf); + if (tableArchivePath.getName().startsWith("/")) { + isCopyNeeded = true; + } else { + // This should match what is done in @see HRegionFileSystem#bulkLoadStoreFile(String, Path, + // long) + if (srcFs.getUri().equals(desFs.getUri())) { + LOG.debug("cluster hold the backup image: " + srcFs.getUri() + "; local cluster node: " + + desFs.getUri()); + isCopyNeeded = true; + } + } + if (isCopyNeeded) { + LOG.debug("File " + tableArchivePath + " on local cluster, back it up before restore"); + if (desFs.exists(restoreTmpPath)) { + try { + desFs.delete(restoreTmpPath, true); + } catch (IOException e) { + LOG.debug("Failed to delete path: " + restoreTmpPath + + ", need to check whether restore target DFS cluster is healthy"); + } + } + FileUtil.copy(srcFs, tableArchivePath, desFs, restoreTmpPath, false, conf); + LOG.debug("Copied to temporary path on local cluster: " + restoreTmpPath); + tableArchivePath = restoreTmpPath; + } + return tableArchivePath; + } + + /** + * Calculate region boundaries and add all the column families to the table descriptor + * @param regionDirList region dir list + * @return a set of keys to store the boundaries + */ + public byte[][] generateBoundaryKeys(ArrayList regionDirList) + throws FileNotFoundException, IOException { + TreeMap map = new TreeMap(Bytes.BYTES_COMPARATOR); + // Build a set of keys to store the boundaries + byte[][] keys = null; + // calculate region boundaries and add all the column families to the table descriptor + for (Path regionDir : regionDirList) { + LOG.debug("Parsing region dir: " + regionDir); + Path hfofDir = regionDir; + + if (!fs.exists(hfofDir)) { + LOG.warn("HFileOutputFormat dir " + hfofDir + " not found"); + } + + FileStatus[] familyDirStatuses = fs.listStatus(hfofDir); + if (familyDirStatuses == null) { + throw new IOException("No families found in " + hfofDir); + } + + for (FileStatus stat : familyDirStatuses) { + if (!stat.isDirectory()) { + LOG.warn("Skipping non-directory " + stat.getPath()); + continue; + } + boolean isIgnore = false; + String pathName = stat.getPath().getName(); + for (String ignore : ignoreDirs) { + if (pathName.contains(ignore)) { + LOG.warn("Skipping non-family directory" + pathName); + isIgnore = true; + break; + } + } + if (isIgnore) { + continue; + } + Path familyDir = stat.getPath(); + LOG.debug("Parsing family dir [" + familyDir.toString() + " in region [" + regionDir + "]"); + // Skip _logs, etc + if (familyDir.getName().startsWith("_") || familyDir.getName().startsWith(".")) { + continue; + } + + // start to parse hfile inside one family dir + Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir)); + for (Path hfile : hfiles) { + if (hfile.getName().startsWith("_") || hfile.getName().startsWith(".") + || StoreFileInfo.isReference(hfile.getName()) + || HFileLink.isHFileLink(hfile.getName())) { + continue; + } + HFile.Reader reader = HFile.createReader(fs, hfile, new CacheConfig(conf), conf); + final byte[] first, last; + try { + reader.loadFileInfo(); + first = reader.getFirstRowKey(); + last = reader.getLastRowKey(); + LOG.debug("Trying to figure out region boundaries hfile=" + hfile + " first=" + + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last)); + + // To eventually infer start key-end key boundaries + Integer value = map.containsKey(first) ? (Integer) map.get(first) : 0; + map.put(first, value + 1); + value = map.containsKey(last) ? (Integer) map.get(last) : 0; + map.put(last, value - 1); + } finally { + reader.close(); + } + } + } + } + keys = LoadIncrementalHFiles.inferBoundaries(map); + return keys; + } + + /** + * Check whether the backup image path and there is manifest file in the path. + * @param backupManifestMap If all the manifests are found, then they are put into this map + * @param tableArray the tables involved + * @throws IOException exception + */ + public void checkImageManifestExist(HashMap backupManifestMap, + TableName[] tableArray) throws IOException { + for (TableName tableName : tableArray) { + BackupManifest manifest = this.getManifest(tableName); + backupManifestMap.put(tableName, manifest); + } + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreClient.java new file mode 100644 index 0000000..5c6c253 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreClient.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; + +public interface RestoreClient { + + public void setConf(Configuration conf); + + /** + * Restore operation. + * @param hBackupFS to access the backup image + * @param backupRootDir The root dir for backup image + * @param backupId The backup id for image to be restored + * @param check True if only do dependency check + * @param autoRestore True if automatically restore following the dependency + * @param sTableArray The array of tables to be restored + * @param tTableArray The array of mapping tables to restore to + * @param isOverwrite True then do restore overwrite if target table exists, otherwise fail the + * request if target table exists + * @return True if only do dependency check + * @throws IOException if any failure during restore + */ + public boolean restore(HBackupFileSystem hBackupFS, String backupRootDir, + String backupId, boolean check, boolean autoRestore, TableName[] sTableArray, + TableName[] tTableArray, boolean isOverwrite) throws IOException; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java new file mode 100644 index 0000000..5d7fb33 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java @@ -0,0 +1,175 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import java.io.IOException; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; +import org.apache.commons.cli.PosixParser; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupUtil; +import org.apache.hadoop.hbase.util.AbstractHBaseTool; +import org.apache.hadoop.hbase.util.LogUtils; +import org.apache.hadoop.util.ToolRunner; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; + +public class RestoreDriver extends AbstractHBaseTool { + + private static final Log LOG = LogFactory.getLog(BackupDriver.class); + private Options opt; + private CommandLine cmd; + + private static final String OPTION_OVERWRITE = "overwrite"; + private static final String OPTION_CHECK = "check"; + private static final String OPTION_AUTOMATIC = "automatic"; + + private static final String USAGE = + "Usage: hbase restore [tableMapping] \n" + + " [-overwrite] [-check] [-automatic]\n" + + " backup_root_path The parent location where the backup images are stored\n" + + " backup_id The id identifying the backup image\n" + + " table(s) Table(s) from the backup image to be restored.\n" + + " Tables are separated by comma.\n" + + " Options:\n" + + " tableMapping A comma separated list of target tables.\n" + + " If specified, each table in must have a mapping.\n" + + " -overwrite With this option, restore overwrites to the existing table " + + "if there's any in\n" + + " restore target. The existing table must be online before restore.\n" + + " -check With this option, restore sequence and dependencies are checked\n" + + " and verified without executing the restore\n" + + " -automatic With this option, all the dependencies are automatically restored\n" + + " together with this backup image following the correct order.\n" + + " The restore dependencies can be checked by using \"-check\" " + + "option,\n" + + " or using \"hbase backup describe\" command. Without this option, " + + "only\n" + " this backup image is restored\n"; + + protected void init() throws IOException { + // define supported options + opt = new Options(); + opt.addOption(OPTION_OVERWRITE, false, + "Overwrite the data if any of the restore target tables exists"); + opt.addOption(OPTION_CHECK, false, "Check restore sequence and dependencies"); + opt.addOption(OPTION_AUTOMATIC, false, "Restore all dependencies"); + opt.addOption("debug", false, "Enable debug logging"); + + // disable irrelevant loggers to avoid it mess up command output + LogUtils.disableUselessLoggers(LOG); + } + + private int parseAndRun(String[] args) { + CommandLine cmd = null; + try { + cmd = new PosixParser().parse(opt, args); + } catch (ParseException e) { + LOG.error("Could not parse command", e); + return -1; + } + + // enable debug logging + Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup"); + if (cmd.hasOption("debug")) { + backupClientLogger.setLevel(Level.DEBUG); + } + + // whether to overwrite to existing table if any, false by default + boolean isOverwrite = cmd.hasOption(OPTION_OVERWRITE); + if (isOverwrite) { + LOG.debug("Found -overwrite option in restore command, " + + "will overwrite to existing table if any in the restore target"); + } + + // whether to only check the dependencies, false by default + boolean check = cmd.hasOption(OPTION_CHECK); + if (check) { + LOG.debug("Found -check option in restore command, " + + "will check and verify the dependencies"); + } + + // whether to restore all dependencies, false by default + boolean autoRestore = cmd.hasOption(OPTION_AUTOMATIC); + if (autoRestore) { + LOG.debug("Found -automatic option in restore command, " + + "will automatically retore all the dependencies"); + } + + // parse main restore command options + String[] remainArgs = cmd.getArgs(); + if (remainArgs.length < 3) { + System.out.println("ERROR: missing arguments"); + System.out.println(USAGE); + return -1; + } + + String backupRootDir = remainArgs[0]; + String backupId = remainArgs[1]; + String tables = remainArgs[2]; + + String tableMapping = (remainArgs.length > 3) ? remainArgs[3] : null; + + TableName[] sTableArray = BackupUtil.parseTableNames(tables); + TableName[] tTableArray = BackupUtil.parseTableNames(tableMapping); + + if (sTableArray != null && tTableArray != null && (sTableArray.length != tTableArray.length)) { + System.err.println("ERROR: table mapping mismatch: " + tables + " : " + tableMapping); + System.out.println(USAGE); + return -1; + } + + try { + HBackupFileSystem hBackupFS = new HBackupFileSystem(conf, new Path(backupRootDir), backupId); + RestoreClient client = BackupRestoreFactory.getRestoreClient(conf); + client.restore(hBackupFS, backupRootDir, backupId, check, autoRestore, sTableArray, + tTableArray, isOverwrite); + } catch (IOException e) { + System.err.println("ERROR: " + e.getMessage()); + return -1; + } + return 0; + } + + @Override + protected void addOptions() { + } + + @Override + protected void processOptions(CommandLine cmd) { + this.cmd = cmd; + } + + @Override + protected int doWork() throws Exception { + init(); + return parseAndRun(cmd.getArgs()); + } + + public static void main(String[] args) throws Exception { + Configuration conf = HBaseConfiguration.create(); + int ret = ToolRunner.run(conf, new BackupDriver(), args); + System.exit(ret); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java new file mode 100644 index 0000000..5b8a151 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java @@ -0,0 +1,183 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupClient; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.BackupUtility; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + +import com.google.common.collect.Lists; + +/** + * Backup HBase tables locally or on a remote cluster Serve as client entry point for the following + * features: - Full Backup provide local and remote back/restore for a list of tables - Incremental + * backup to build on top of full backup as daily/weekly backup - Convert incremental backup WAL + * files into hfiles - Merge several backup images into one(like merge weekly into monthly) - Add + * and remove table to and from Backup image - Cancel a backup process - Describe information of + * a backup image + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public final class BackupClientImpl implements BackupClient { + private static final Log LOG = LogFactory.getLog(BackupClientImpl.class); + private Configuration conf; + private BackupManager backupManager; + + public BackupClientImpl() { + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + + /** + * Prepare and submit Backup request + * @param backupId : backup_timestame (something like backup_1398729212626) + * @param backupType : full or incremental + * @param tableList : tables to be backuped + * @param targetRootDir : specified by user + * @throws IOException exception + */ + protected void requestBackup(String backupId, BackupType backupType, List tableList, + String targetRootDir) throws IOException { + + BackupContext backupContext = null; + + HBaseAdmin hbadmin = null; + Connection conn = null; + try { + backupManager = new BackupManager(conf); + if (backupType == BackupType.INCREMENTAL) { + Set incrTableSet = backupManager.getIncrementalBackupTableSet(); + if (incrTableSet.isEmpty()) { + LOG.warn("Incremental backup table set contains no table.\n" + + "Use 'backup create full' or 'backup stop' to \n " + + "change the tables covered by incremental backup."); + throw new DoNotRetryIOException("No table covered by incremental backup."); + } + + LOG.info("Incremental backup for the following table set: " + incrTableSet); + tableList = Lists.newArrayList(incrTableSet); + } + + // check whether table exists first before starting real request + if (tableList != null) { + ArrayList nonExistingTableList = null; + conn = ConnectionFactory.createConnection(conf); + hbadmin = (HBaseAdmin) conn.getAdmin(); + for (TableName tableName : tableList) { + if (!hbadmin.tableExists(tableName)) { + if (nonExistingTableList == null) { + nonExistingTableList = new ArrayList<>(); + } + nonExistingTableList.add(tableName); + } + } + if (nonExistingTableList != null) { + if (backupType == BackupType.INCREMENTAL ) { + LOG.warn("Incremental backup table set contains non-exising table: " + + nonExistingTableList); + } else { + // Throw exception only in full mode - we try to backup non-existing table + throw new DoNotRetryIOException("Non-existing tables found in the table list: " + + nonExistingTableList); + } + } + } + + // if any target table backup dir already exist, then no backup action taken + if (tableList != null) { + for (TableName table : tableList) { + String targetTableBackupDir = + HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table); + Path targetTableBackupDirPath = new Path(targetTableBackupDir); + FileSystem outputFs = FileSystem.get(targetTableBackupDirPath.toUri(), conf); + if (outputFs.exists(targetTableBackupDirPath)) { + throw new DoNotRetryIOException("Target backup directory " + targetTableBackupDir + + " exists already."); + } + } + } + backupContext = + backupManager.createBackupContext(backupId, backupType, tableList, targetRootDir); + backupManager.initialize(); + backupManager.dispatchRequest(backupContext); + } catch (BackupException e) { + // suppress the backup exception wrapped within #initialize or #dispatchRequest, backup + // exception has already been handled normally + LOG.error("Backup Exception ", e); + } finally { + if (hbadmin != null) { + hbadmin.close(); + } + if (conn != null) { + conn.close(); + } + } + } + + @Override + public String create(BackupType backupType, List tableList, String backupRootPath) + throws IOException { + + String backupId = BackupRestoreConstants.BACKUPID_PREFIX + EnvironmentEdgeManager.currentTime(); + BackupUtility.checkTargetDir(backupRootPath, conf); + + // table list specified for backup, trigger backup on specified tables + try { + requestBackup(backupId, backupType, tableList, backupRootPath); + } catch (RuntimeException e) { + String errMsg = e.getMessage(); + if (errMsg != null + && (errMsg.startsWith("Non-existing tables found") || errMsg + .startsWith("Snapshot is not found"))) { + LOG.error(errMsg + ", please check your command"); + throw e; + } else { + throw e; + } + } finally{ + if(backupManager != null) { + backupManager.close(); + } + } + return backupId; + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java new file mode 100644 index 0000000..56e26fa --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java @@ -0,0 +1,158 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import org.apache.commons.cli.CommandLine; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hbase.backup.BackupClient; +import org.apache.hadoop.hbase.backup.BackupRestoreFactory; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants.BackupCommand; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +import com.google.common.collect.Lists; + +/** + * General backup commands, options and usage messages + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class BackupCommands { + + private static final String USAGE = "Usage: hbase backup COMMAND\n" + + "where COMMAND is one of:\n" + " create create a new backup image\n" + + "Enter \'help COMMAND\' to see help message for each command\n"; + + private static final String CREATE_CMD_USAGE = + "Usage: hbase backup create [tables] [-convert] " + + "\n" + " type \"full\" to create a full backup image;\n" + + " \"incremental\" to create an incremental backup image\n" + + " backup_root_path The full root path to store the backup image,\n" + + " the prefix can be hdfs, webhdfs, gpfs, etc\n" + " Options:\n" + + " tables If no tables (\"\") are specified, all tables are backed up. " + + "Otherwise it is a\n" + " comma separated list of tables.\n" + + " -convert For an incremental backup, convert WAL files to HFiles\n"; + + public static abstract class Command extends Configured { + Command(Configuration conf) { + super(conf); + } + public abstract void execute() throws IOException; + } + + private BackupCommands() { + throw new AssertionError("Instantiating utility class..."); + } + + public static Command createCommand(Configuration conf, BackupCommand type, CommandLine cmdline) { + Command cmd = null; + switch (type) { + case CREATE: + cmd = new CreateCommand(conf, cmdline); + break; + case HELP: + default: + cmd = new HelpCommand(conf, cmdline); + break; + } + return cmd; + } + + private static class CreateCommand extends Command { + CommandLine cmdline; + + CreateCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + if (cmdline == null || cmdline.getArgs() == null) { + System.out.println("ERROR: missing arguments"); + System.out.println(CREATE_CMD_USAGE); + System.exit(-1); + } + String[] args = cmdline.getArgs(); + if (args.length < 2 || args.length > 3) { + System.out.println("ERROR: wrong number of arguments"); + System.out.println(CREATE_CMD_USAGE); + System.exit(-1); + } + + if (!BackupType.FULL.toString().equalsIgnoreCase(args[0]) + && !BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[0])) { + System.out.println("ERROR: invalid backup type"); + System.out.println(CREATE_CMD_USAGE); + System.exit(-1); + } + + String tables = (args.length == 3) ? args[2] : null; + + try { + BackupClient client = BackupRestoreFactory.getBackupClient(getConf()); + client.create(BackupType.valueOf(args[0].toUpperCase()), + Lists.newArrayList(BackupUtil.parseTableNames(tables)), args[1]); + } catch (RuntimeException e) { + System.out.println("ERROR: " + e.getMessage()); + System.exit(-1); + } + } + } + + private static class HelpCommand extends Command { + CommandLine cmdline; + + HelpCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + if (cmdline == null) { + System.out.println(USAGE); + System.exit(0); + } + + String[] args = cmdline.getArgs(); + if (args == null || args.length == 0) { + System.out.println(USAGE); + System.exit(0); + } + + if (args.length != 1) { + System.out.println("Only support check help message of a single command type"); + System.out.println(USAGE); + System.exit(0); + } + + String type = args[0]; + + if (BackupCommand.CREATE.name().equalsIgnoreCase(type)) { + System.out.println(CREATE_CMD_USAGE); + } // other commands will be supported in future jira + System.exit(0); + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java new file mode 100644 index 0000000..1be0c3b --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java @@ -0,0 +1,382 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus; + +/** + * An object to encapsulate the information for each backup request + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BackupContext { + + public Map getBackupStatusMap() { + return backupStatusMap; + } + + public void setBackupStatusMap(Map backupStatusMap) { + this.backupStatusMap = backupStatusMap; + } + + public HashMap> getTableSetTimestampMap() { + return tableSetTimestampMap; + } + + public void setTableSetTimestampMap( + HashMap> tableSetTimestampMap) { + this.tableSetTimestampMap = tableSetTimestampMap; + } + + public String getHlogTargetDir() { + return hlogTargetDir; + } + + public void setType(BackupType type) { + this.type = type; + } + + public void setTargetRootDir(String targetRootDir) { + this.targetRootDir = targetRootDir; + } + + public void setTotalBytesCopied(long totalBytesCopied) { + this.totalBytesCopied = totalBytesCopied; + } + + public void setCancelled(boolean cancelled) { + this.state = BackupState.CANCELLED;; + } + + // backup id: a timestamp when we request the backup + private String backupId; + + // backup type, full or incremental + private BackupType type; + + // target root directory for storing the backup files + private String targetRootDir; + + // overall backup state + private BackupHandler.BackupState state; + + // overall backup phase + private BackupHandler.BackupPhase phase; + + // overall backup failure message + private String failedMsg; + + // backup status map for all tables + private Map backupStatusMap; + + // actual start timestamp of the backup process + private long startTs; + + // actual end timestamp of the backup process, could be fail or complete + private long endTs; + + // the total bytes of incremental logs copied + private long totalBytesCopied; + + // for incremental backup, the location of the backed-up hlogs + private String hlogTargetDir = null; + + // incremental backup file list + transient private List incrBackupFileList; + + // new region server log timestamps for table set after distributed log roll + // key - table name, value - map of RegionServer hostname -> last log rolled timestamp + transient private HashMap> tableSetTimestampMap; + + // backup progress in %% (0-100) + + private int progress; + + public BackupContext() { + } + + public BackupContext(String backupId, BackupType type, TableName[] tables, String targetRootDir) { + backupStatusMap = new HashMap(); + + this.backupId = backupId; + this.type = type; + this.targetRootDir = targetRootDir; + + this.addTables(tables); + + if (type == BackupType.INCREMENTAL) { + setHlogTargetDir(HBackupFileSystem.getLogBackupDir(targetRootDir, backupId)); + } + + this.startTs = 0; + this.endTs = 0; + } + + /** + * Set progress string + * @param msg progress message + */ + + public void setProgress(int p) { + this.progress = p; + } + + /** + * Get current progress + */ + public int getProgress() { + return progress; + } + + + /** + * Has been marked as cancelled or not. + * @return True if marked as cancelled + */ + public boolean isCancelled() { + return this.state == BackupState.CANCELLED; + } + + public String getBackupId() { + return backupId; + } + + public void setBackupId(String backupId) { + this.backupId = backupId; + } + + public BackupStatus getBackupStatus(TableName table) { + return this.backupStatusMap.get(table); + } + + public String getFailedMsg() { + return failedMsg; + } + + public void setFailedMsg(String failedMsg) { + this.failedMsg = failedMsg; + } + + public long getStartTs() { + return startTs; + } + + public void setStartTs(long startTs) { + this.startTs = startTs; + } + + public long getEndTs() { + return endTs; + } + + public void setEndTs(long endTs) { + this.endTs = endTs; + } + + public long getTotalBytesCopied() { + return totalBytesCopied; + } + + public BackupHandler.BackupState getState() { + return state; + } + + public void setState(BackupHandler.BackupState flag) { + this.state = flag; + } + + public BackupHandler.BackupPhase getPhase() { + return phase; + } + + public void setPhase(BackupHandler.BackupPhase phase) { + this.phase = phase; + } + + public BackupType getType() { + return type; + } + + public void setSnapshotName(TableName table, String snapshotName) { + this.backupStatusMap.get(table).setSnapshotName(snapshotName); + } + + public String getSnapshotName(TableName table) { + return this.backupStatusMap.get(table).getSnapshotName(); + } + + public List getSnapshotNames() { + List snapshotNames = new ArrayList(); + for (BackupStatus backupStatus : this.backupStatusMap.values()) { + snapshotNames.add(backupStatus.getSnapshotName()); + } + return snapshotNames; + } + + public Set getTables() { + return this.backupStatusMap.keySet(); + } + + public List getTableNames() { + return new ArrayList(backupStatusMap.keySet()); + } + + public void addTables(TableName[] tables) { + for (TableName table : tables) { + BackupStatus backupStatus = new BackupStatus(table, this.targetRootDir, this.backupId); + this.backupStatusMap.put(table, backupStatus); + } + } + + public String getTargetRootDir() { + return targetRootDir; + } + + public void setHlogTargetDir(String hlogTagetDir) { + this.hlogTargetDir = hlogTagetDir; + } + + public String getHLogTargetDir() { + return hlogTargetDir; + } + + public List getIncrBackupFileList() { + return incrBackupFileList; + } + + public List setIncrBackupFileList(List incrBackupFileList) { + this.incrBackupFileList = incrBackupFileList; + return this.incrBackupFileList; + } + + /** + * Set the new region server log timestamps after distributed log roll + * @param newTableSetTimestampMap table timestamp map + */ + public void setIncrTimestampMap(HashMap> newTableSetTimestampMap) { + this.tableSetTimestampMap = newTableSetTimestampMap; + } + + /** + * Get new region server log timestamps after distributed log roll + * @return new region server log timestamps + */ + public HashMap> getIncrTimestampMap() { + return this.tableSetTimestampMap; + } + + public TableName getTableBySnapshot(String snapshotName) { + for (Entry entry : this.backupStatusMap.entrySet()) { + if (snapshotName.equals(entry.getValue().getSnapshotName())) { + return entry.getKey(); + } + } + return null; + } + + public byte[] toByteArray() throws IOException { + BackupProtos.BackupContext.Builder builder = + BackupProtos.BackupContext.newBuilder(); + builder.setBackupId(getBackupId()); + setBackupStatusMap(builder); + builder.setEndTs(getEndTs()); + if(getFailedMsg() != null){ + builder.setFailedMessage(getFailedMsg()); + } + if(getState() != null){ + builder.setState(BackupProtos.BackupContext.BackupState.valueOf(getState().name())); + } + if(getPhase() != null){ + builder.setPhase(BackupProtos.BackupContext.BackupPhase.valueOf(getPhase().name())); + } + if(getHLogTargetDir() != null){ + builder.setHlogTargetDir(getHLogTargetDir()); + } + + builder.setProgress(getProgress()); + builder.setStartTs(getStartTs()); + builder.setTargetRootDir(getTargetRootDir()); + builder.setTotalBytesCopied(getTotalBytesCopied()); + builder.setType(BackupProtos.BackupType.valueOf(getType().name())); + byte[] data = builder.build().toByteArray(); + return data; + } + + private void setBackupStatusMap(Builder builder) { + for (Entry entry: backupStatusMap.entrySet()) { + builder.addTableBackupStatus(entry.getValue().toProto()); + } + } + + public static BackupContext fromByteArray(byte[] data) throws IOException { + + BackupContext context = new BackupContext(); + BackupProtos.BackupContext proto = BackupProtos.BackupContext.parseFrom(data); + context.setBackupId(proto.getBackupId()); + context.setBackupStatusMap(toMap(proto.getTableBackupStatusList())); + context.setEndTs(proto.getEndTs()); + if(proto.hasFailedMessage()) { + context.setFailedMsg(proto.getFailedMessage()); + } + if(proto.hasState()) { + context.setState(BackupHandler.BackupState.valueOf(proto.getState().name())); + } + if(proto.hasHlogTargetDir()) { + context.setHlogTargetDir(proto.getHlogTargetDir()); + } + if(proto.hasPhase()) { + context.setPhase(BackupHandler.BackupPhase.valueOf(proto.getPhase().name())); + } + if(proto.hasProgress()) { + context.setProgress(proto.getProgress()); + } + context.setStartTs(proto.getStartTs()); + context.setTargetRootDir(proto.getTargetRootDir()); + context.setTotalBytesCopied(proto.getTotalBytesCopied()); + context.setType(BackupType.valueOf(proto.getType().name())); + return context; + } + + private static Map toMap(List list) { + HashMap map = new HashMap<>(); + for (TableBackupStatus tbs : list){ + map.put(ProtobufUtil.toTableName(tbs.getTable()), BackupStatus.convert(tbs)); + } + return map; + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCopyService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCopyService.java new file mode 100644 index 0000000..1e8da63 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCopyService.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface BackupCopyService extends Configurable { + static enum Type { + FULL, INCREMENTAL + } + + public int copy(BackupContext backupContext, BackupManager backupManager, Configuration conf, + BackupCopyService.Type copyType, String[] options) throws IOException; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java new file mode 100644 index 0000000..af70cc8 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Backup exception + */ +@SuppressWarnings("serial") +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BackupException extends HBaseIOException { + private BackupContext description; + + /** + * Some exception happened for a backup and don't even know the backup that it was about + * @param msg Full description of the failure + */ + public BackupException(String msg) { + super(msg); + } + + /** + * Some exception happened for a backup with a cause + * @param cause the cause + */ + public BackupException(Throwable cause) { + super(cause); + } + + /** + * Exception for the given backup that has no previous root cause + * @param msg reason why the backup failed + * @param desc description of the backup that is being failed + */ + public BackupException(String msg, BackupContext desc) { + super(msg); + this.description = desc; + } + + /** + * Exception for the given backup due to another exception + * @param msg reason why the backup failed + * @param cause root cause of the failure + * @param desc description of the backup that is being failed + */ + public BackupException(String msg, Throwable cause, BackupContext desc) { + super(msg, cause); + this.description = desc; + } + + /** + * Exception when the description of the backup cannot be determined, due to some other root + * cause + * @param message description of what caused the failure + * @param e root cause + */ + public BackupException(String message, Exception e) { + super(message, e); + } + + public BackupContext getBackupContext() { + return this.description; + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java new file mode 100644 index 0000000..7bd6e99 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java @@ -0,0 +1,702 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.Callable; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupRestoreFactory; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.BackupUtility; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; +import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.zookeeper.KeeperException.NoNodeException; + +/** + * A Handler to carry the operations of backup progress + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BackupHandler implements Callable { + private static final Log LOG = LogFactory.getLog(BackupHandler.class); + + // backup phase + // for overall backup (for table list, some table may go online, while some may go offline) + protected static enum BackupPhase { + REQUEST, SNAPSHOT, PREPARE_INCREMENTAL, SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST; + } + + // backup status flag + public static enum BackupState { + WAITING, RUNNING, COMPLETE, FAILED, CANCELLED; + } + + protected final BackupContext backupContext; + private final BackupManager backupManager; + private final Configuration conf; + private final Connection conn; + + public BackupHandler(BackupContext backupContext, + BackupManager backupManager, Configuration conf, Connection connection) { + this.backupContext = backupContext; + this.backupManager = backupManager; + this.conf = conf; + this.conn = connection; + } + + public BackupContext getBackupContext() { + return backupContext; + } + + @Override + public Void call() throws Exception { + try(Admin admin = conn.getAdmin()) { + // overall backup begin + this.beginBackup(backupContext); + HashMap newTimestamps = null; + // handle full or incremental backup for table or table list + if (backupContext.getType() == BackupType.FULL) { + String savedStartCode = null; + boolean firstBackup = false; + // do snapshot for full table backup + + try { + savedStartCode = backupManager.readBackupStartCode(); + firstBackup = savedStartCode == null; + if (firstBackup) { + // This is our first backup. Let's put some marker on ZK so that we can hold the logs + // while we do the backup. + backupManager.writeBackupStartCode(0L); + } + // We roll log here before we do the snapshot. It is possible there is duplicate data + // in the log that is already in the snapshot. But if we do it after the snapshot, we + // could have data loss. + // A better approach is to do the roll log on each RS in the same global procedure as + // the snapshot. + LOG.info("Execute roll log procedure for full backup ..."); + admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, + LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, new HashMap()); + newTimestamps = backupManager.readRegionServerLastLogRollResult(); + if (firstBackup) { + // Updates registered log files + // We record ALL old WAL files as registered, because + // this is a first full backup in the system and these + // files are not needed for next incremental backup + List logFiles = BackupUtil.getWALFilesOlderThan(conf, newTimestamps); + backupManager.recordWALFiles(logFiles); + } + this.snapshotForFullBackup(backupContext); + } catch (BackupException e) { + // fail the overall backup and return + this.failBackup(backupContext, e, "Unexpected BackupException : "); + return null; + } + + // update the faked progress currently for snapshot done + updateProgress(backupContext, backupManager, 10, 0); + // do snapshot copy + try { + this.snapshotCopy(backupContext); + } catch (Exception e) { + // fail the overall backup and return + this.failBackup(backupContext, e, "Unexpected BackupException : "); + return null; + } + // Updates incremental backup table set + backupManager.addIncrementalBackupTableSet(backupContext.getTables()); + + } else if (backupContext.getType() == BackupType.INCREMENTAL) { + LOG.debug("For incremental backup, current table set is " + + backupManager.getIncrementalBackupTableSet()); + // do incremental table backup preparation + backupContext.setPhase(BackupPhase.PREPARE_INCREMENTAL); + // avoid action if has been cancelled + if (backupContext.isCancelled()) { + return null; + } + try { + IncrementalBackupManager incrBackupManager = new IncrementalBackupManager(backupManager); + + newTimestamps = incrBackupManager.getIncrBackupLogFileList(backupContext); + } catch (Exception e) { + // fail the overall backup and return + this.failBackup(backupContext, e, "Unexpected Exception : "); + return null; + } + // update the faked progress currently for incremental preparation done + updateProgress(backupContext, backupManager, 10, 0); + + // do incremental copy + try { + // copy out the table and region info files for each table + BackupUtil.copyTableRegionInfo(backupContext, conf); + this.incrementalCopy(backupContext); + // Save list of WAL files copied + backupManager.recordWALFiles(backupContext.getIncrBackupFileList()); + } catch (Exception e) { + // fail the overall backup and return + this.failBackup(backupContext, e, "Unexpected exception doing incremental copy : "); + return null; + } + } + + // set overall backup status: complete. Here we make sure to complete the backup. After this + // checkpoint, even if entering cancel process, will let the backup finished + backupContext.setState(BackupState.COMPLETE); + + if (backupContext.getType() == BackupType.INCREMENTAL) { + // Set the previousTimestampMap which is before this current log roll to the manifest. + HashMap> previousTimestampMap = + backupManager.readLogTimestampMap(); + backupContext.setIncrTimestampMap(previousTimestampMap); + } + + // The table list in backupContext is good for both full backup and incremental backup. + // For incremental backup, it contains the incremental backup table set. + backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps); + + HashMap> newTableSetTimestampMap = + backupManager.readLogTimestampMap(); + + Long newStartCode = + BackupUtility.getMinValue(BackupUtil.getRSLogTimestampMins(newTableSetTimestampMap)); + backupManager.writeBackupStartCode(newStartCode); + + // backup complete + this.completeBackup(backupContext); + } catch (Exception e) { + // even during completing backup (#completeBackup(backupContext)), exception may occur, or + // exception occur during other process, fail the backup finally + this.failBackup(backupContext, e, "Error caught during backup progress: "); + } + return null; + } + + /** + * Begin the overall backup. + * @param backupContext backup context + * @throws IOException exception + */ + private void beginBackup(BackupContext backupContext) throws IOException { + // set the start timestamp of the overall backup + long startTs = EnvironmentEdgeManager.currentTime(); + backupContext.setStartTs(startTs); + // set overall backup status: ongoing + backupContext.setState(BackupState.RUNNING); + LOG.info("Backup " + backupContext.getBackupId() + " started at " + startTs + "."); + + backupManager.updateBackupStatus(backupContext); + if (LOG.isDebugEnabled()) { + LOG.debug("Backup session " + backupContext.getBackupId() + " has been started."); + } + } + + /** + * Snapshot for full table backup. + * @param backupContext backup context + * @throws IOException exception + */ + private void snapshotForFullBackup(BackupContext backupContext) throws IOException { + LOG.info("HBase snapshot full backup for " + backupContext.getBackupId()); + + // avoid action if has been cancelled + if (backupContext.isCancelled()) { + return; + } + + try (Admin admin = conn.getAdmin()) { + // we do HBase snapshot for tables in the table list one by one currently + for (TableName table : backupContext.getTables()) { + // avoid action if it has been cancelled + if (backupContext.isCancelled()) { + return; + } + + HBaseProtos.SnapshotDescription backupSnapshot; + + // wrap a SnapshotDescription for offline/online snapshot + backupSnapshot = this.wrapSnapshotDescription(table); + + try { + // Kick off snapshot for backup + admin.snapshot(backupSnapshot); + } catch (Exception e) { + LOG.error("Snapshot failed to create " + getMessage(e)); + + // currently, we fail the overall backup if any table in the list failed, so throw the + // exception out for overall backup failing + throw new BackupException("Backup snapshot failed on table " + table, e); + } + + // set the snapshot name in BackupStatus of this table, only after snapshot success. + backupContext.setSnapshotName(table, backupSnapshot.getName()); + } + } + } + + /** + * Fail the overall backup. + * @param backupContext backup context + * @param e exception + * @throws Exception exception + */ + private void failBackup(BackupContext backupContext, Exception e, String msg) throws Exception { + LOG.error(msg + getMessage(e)); + // If this is a cancel exception, then we've already cleaned. + + if (this.backupContext.getState().equals(BackupState.CANCELLED)) { + return; + } + + // set the failure timestamp of the overall backup + backupContext.setEndTs(EnvironmentEdgeManager.currentTime()); + + // set failure message + backupContext.setFailedMsg(e.getMessage()); + + // set overall backup status: failed + backupContext.setState(BackupState.FAILED); + + // compose the backup failed data + String backupFailedData = + "BackupId=" + backupContext.getBackupId() + ",startts=" + backupContext.getStartTs() + + ",failedts=" + backupContext.getEndTs() + ",failedphase=" + backupContext.getPhase() + + ",failedmessage=" + backupContext.getFailedMsg(); + LOG.error(backupFailedData); + + backupManager.updateBackupStatus(backupContext); + + // if full backup, then delete HBase snapshots if there already have snapshots taken + // and also clean up export snapshot log files if exist + if (backupContext.getType() == BackupType.FULL) { + this.deleteSnapshot(backupContext); + this.cleanupExportSnapshotLog(); + } /* + * else { // support incremental backup code in future jira // TODO. See HBASE-14124 } + */ + + // clean up the uncompleted data at target directory if the ongoing backup has already entered + // the copy phase + // For incremental backup, DistCp logs will be cleaned with the targetDir. + this.cleanupTargetDir(); + + LOG.info("Backup " + backupContext.getBackupId() + " failed."); + } + + /** + * Update the ongoing back token znode with new progress. + * @param backupContext backup context + * + * @param newProgress progress + * @param bytesCopied bytes copied + * @throws NoNodeException exception + */ + public static void updateProgress(BackupContext backupContext, BackupManager backupManager, + int newProgress, long bytesCopied) throws IOException { + // compose the new backup progress data, using fake number for now + String backupProgressData = newProgress + "%"; + + backupContext.setProgress(newProgress); + backupManager.updateBackupStatus(backupContext); + LOG.debug("Backup progress data \"" + backupProgressData + + "\" has been updated to hbase:backup for " + backupContext.getBackupId()); + } + + /** + * Complete the overall backup. + * @param backupContext backup context + * @throws Exception exception + */ + private void completeBackup(BackupContext backupContext) throws Exception { + + // set the complete timestamp of the overall backup + backupContext.setEndTs(EnvironmentEdgeManager.currentTime()); + // set overall backup status: complete + backupContext.setState(BackupState.COMPLETE); + // add and store the manifest for the backup + this.addManifest(backupContext); + + // after major steps done and manifest persisted, do convert if needed for incremental backup + /* in-fly convert code here, provided by future jira */ + LOG.debug("in-fly convert code here, provided by future jira"); + + // compose the backup complete data + String backupCompleteData = + this.obtainBackupMetaDataStr(backupContext) + ",startts=" + backupContext.getStartTs() + + ",completets=" + backupContext.getEndTs() + ",bytescopied=" + + backupContext.getTotalBytesCopied(); + if (LOG.isDebugEnabled()) { + LOG.debug("Backup " + backupContext.getBackupId() + " finished: " + backupCompleteData); + } + backupManager.updateBackupStatus(backupContext); + + // when full backup is done: + // - delete HBase snapshot + // - clean up directories with prefix "exportSnapshot-", which are generated when exporting + // snapshots + if (backupContext.getType() == BackupType.FULL) { + this.deleteSnapshot(backupContext); + this.cleanupExportSnapshotLog(); + } else if (backupContext.getType() == BackupType.INCREMENTAL) { + this.cleanupDistCpLog(); + } + + LOG.info("Backup " + backupContext.getBackupId() + " completed."); + } + + /** + * Get backup request meta data dir as string. + * @param backupContext backup context + * @return meta data dir + */ + private String obtainBackupMetaDataStr(BackupContext backupContext) { + StringBuffer sb = new StringBuffer(); + sb.append("type=" + backupContext.getType() + ",tablelist="); + for (TableName table : backupContext.getTables()) { + sb.append(table + ";"); + } + if (sb.lastIndexOf(";") > 0) { + sb.delete(sb.lastIndexOf(";"), sb.lastIndexOf(";") + 1); + } + sb.append(",targetRootDir=" + backupContext.getTargetRootDir()); + + return sb.toString(); + } + + /** + * Do snapshot copy. + * @param backupContext backup context + * @throws Exception exception + */ + private void snapshotCopy(BackupContext backupContext) throws Exception { + LOG.info("Snapshot copy is starting."); + + // set overall backup phase: snapshot_copy + backupContext.setPhase(BackupPhase.SNAPSHOTCOPY); + + // avoid action if has been cancelled + if (backupContext.isCancelled()) { + return; + } + + // call ExportSnapshot to copy files based on hbase snapshot for backup + // ExportSnapshot only support single snapshot export, need loop for multiple tables case + BackupCopyService copyService = BackupRestoreFactory.getBackupCopyService(conf); + + // number of snapshots matches number of tables + float numOfSnapshots = backupContext.getSnapshotNames().size(); + + LOG.debug("There are " + (int) numOfSnapshots + " snapshots to be copied."); + + for (TableName table : backupContext.getTables()) { + // Currently we simply set the sub copy tasks by counting the table snapshot number, we can + // calculate the real files' size for the percentage in the future. + // TODO this below + // backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots); + int res = 0; + String[] args = new String[4]; + args[0] = "-snapshot"; + args[1] = backupContext.getSnapshotName(table); + args[2] = "-copy-to"; + args[3] = backupContext.getBackupStatus(table).getTargetDir(); + + LOG.debug("Copy snapshot " + args[1] + " to " + args[3]); + res = copyService.copy(backupContext, backupManager, conf, BackupCopyService.Type.FULL, args); + // if one snapshot export failed, do not continue for remained snapshots + if (res != 0) { + LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + "."); + + throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3] + + " with reason code " + res); + } + + LOG.info("Snapshot copy " + args[1] + " finished."); + } + } + + /** + * Wrap a SnapshotDescription for a target table. + * @param table table + * @return a SnapshotDescription especially for backup. + */ + private SnapshotDescription wrapSnapshotDescription(TableName tableName) { + // Mock a SnapshotDescription from backupContext to call SnapshotManager function, + // Name it in the format "snapshot__" + HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder(); + builder.setTable(tableName.getNameAsString()); + builder.setName("snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_" + + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString()); + HBaseProtos.SnapshotDescription backupSnapshot = builder.build(); + + LOG.debug("Wrapped a SnapshotDescription " + backupSnapshot.getName() + + " from backupContext to request snapshot for backup."); + + return backupSnapshot; + } + + /** + * Delete HBase snapshot for backup. + * @param backupCtx backup context + * @throws Exception exception + */ + private void deleteSnapshot(BackupContext backupCtx) throws IOException { + + LOG.debug("Trying to delete snapshot for full backup."); + Connection conn = null; + Admin admin = null; + try { + conn = ConnectionFactory.createConnection(conf); + admin = conn.getAdmin(); + for (String snapshotName : backupCtx.getSnapshotNames()) { + if (snapshotName == null) { + continue; + } + LOG.debug("Trying to delete snapshot: " + snapshotName); + admin.deleteSnapshot(snapshotName); + LOG.debug("Deleting the snapshot " + snapshotName + " for backup " + + backupCtx.getBackupId() + " succeeded."); + } + } finally { + if (admin != null) { + admin.close(); + } + if (conn != null) { + conn.close(); + } + } + } + + /** + * Clean up directories with prefix "exportSnapshot-", which are generated when exporting + * snapshots. + * @throws IOException exception + */ + private void cleanupExportSnapshotLog() throws IOException { + FileSystem fs = FSUtils.getCurrentFileSystem(conf); + Path stagingDir = + new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory() + .toString())); + FileStatus[] files = FSUtils.listStatus(fs, stagingDir); + if (files == null) { + return; + } + for (FileStatus file : files) { + if (file.getPath().getName().startsWith("exportSnapshot-")) { + LOG.debug("Delete log files of exporting snapshot: " + file.getPath().getName()); + if (FSUtils.delete(fs, file.getPath(), true) == false) { + LOG.warn("Can not delete " + file.getPath()); + } + } + } + } + + /** + * Clean up directories with prefix "_distcp_logs-", which are generated when DistCp copying + * hlogs. + * @throws IOException exception + */ + private void cleanupDistCpLog() throws IOException { + Path rootPath = new Path(backupContext.getHLogTargetDir()).getParent(); + FileSystem fs = FileSystem.get(rootPath.toUri(), conf); + FileStatus[] files = FSUtils.listStatus(fs, rootPath); + if (files == null) { + return; + } + for (FileStatus file : files) { + if (file.getPath().getName().startsWith("_distcp_logs")) { + LOG.debug("Delete log files of DistCp: " + file.getPath().getName()); + FSUtils.delete(fs, file.getPath(), true); + } + } + } + + /** + * Clean up the uncompleted data at target directory if the ongoing backup has already entered the + * copy phase. + */ + private void cleanupTargetDir() { + try { + // clean up the uncompleted data at target directory if the ongoing backup has already entered + // the copy phase + LOG.debug("Trying to cleanup up target dir. Current backup phase: " + + backupContext.getPhase()); + if (backupContext.getPhase().equals(BackupPhase.SNAPSHOTCOPY) + || backupContext.getPhase().equals(BackupPhase.INCREMENTAL_COPY) + || backupContext.getPhase().equals(BackupPhase.STORE_MANIFEST)) { + FileSystem outputFs = + FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf); + + // now treat one backup as a transaction, clean up data that has been partially copied at + // table level + for (TableName table : backupContext.getTables()) { + Path targetDirPath = + new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(), + backupContext.getBackupId(), table)); + if (outputFs.delete(targetDirPath, true)) { + LOG.info("Cleaning up uncompleted backup data at " + targetDirPath.toString() + + " done."); + } else { + LOG.info("No data has been copied to " + targetDirPath.toString() + "."); + } + + Path tableDir = targetDirPath.getParent(); + FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir); + if (backups == null || backups.length == 0) { + outputFs.delete(tableDir, true); + LOG.debug(tableDir.toString() + " is empty, remove it."); + } + } + } + + } catch (IOException e1) { + LOG.error("Cleaning up uncompleted backup data of " + backupContext.getBackupId() + " at " + + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); + } + } + + /** + * Add manifest for the current backup. The manifest is stored + * within the table backup directory. + * @param backupContext The current backup context + * @throws IOException exception + * @throws BackupException exception + */ + private void addManifest(BackupContext backupContext) throws IOException, BackupException { + // set the overall backup phase : store manifest + backupContext.setPhase(BackupPhase.STORE_MANIFEST); + + // avoid action if has been cancelled + if (backupContext.isCancelled()) { + return; + } + + BackupManifest manifest; + + // Since we have each table's backup in its own directory structure, + // we'll store its manifest with the table directory. + for (TableName table : backupContext.getTables()) { + manifest = new BackupManifest(backupContext, table); + ArrayList ancestors = this.backupManager.getAncestors(backupContext, table); + for (BackupImage image : ancestors) { + manifest.addDependentImage(image); + } + + if (backupContext.getType() == BackupType.INCREMENTAL) { + // We'll store the log timestamps for this table only in its manifest. + HashMap> tableTimestampMap = + new HashMap>(); + tableTimestampMap.put(table, backupContext.getIncrTimestampMap().get(table)); + manifest.setIncrTimestampMap(tableTimestampMap); + } + manifest.store(conf); + } + + // For incremental backup, we store a overall manifest in + // /WALs/ + // This is used when created the next incremental backup + if (backupContext.getType() == BackupType.INCREMENTAL) { + manifest = new BackupManifest(backupContext); + // set the table region server start and end timestamps for incremental backup + manifest.setIncrTimestampMap(backupContext.getIncrTimestampMap()); + ArrayList ancestors = this.backupManager.getAncestors(backupContext); + for (BackupImage image : ancestors) { + manifest.addDependentImage(image); + } + manifest.store(conf); + } + } + + /** + * Do incremental copy. + * @param backupContext backup context + */ + private void incrementalCopy(BackupContext backupContext) throws Exception { + + LOG.info("Incremental copy is starting."); + + // set overall backup phase: incremental_copy + backupContext.setPhase(BackupPhase.INCREMENTAL_COPY); + + // avoid action if has been cancelled + if (backupContext.isCancelled()) { + return; + } + + // get incremental backup file list and prepare parms for DistCp + List incrBackupFileList = backupContext.getIncrBackupFileList(); + // filter missing files out (they have been copied by previous backups) + incrBackupFileList = filterMissingFiles(incrBackupFileList); + String[] strArr = incrBackupFileList.toArray(new String[incrBackupFileList.size() + 1]); + strArr[strArr.length - 1] = backupContext.getHLogTargetDir(); + + BackupCopyService copyService = BackupRestoreFactory.getBackupCopyService(conf); + int res = copyService.copy(backupContext, backupManager, conf, + BackupCopyService.Type.INCREMENTAL, strArr); + + if (res != 0) { + LOG.error("Copy incremental log files failed with return code: " + res + "."); + throw new IOException("Failed of Hadoop Distributed Copy from " + incrBackupFileList + " to " + + backupContext.getHLogTargetDir()); + } + LOG.info("Incremental copy from " + incrBackupFileList + " to " + + backupContext.getHLogTargetDir() + " finished."); + + } + + private List filterMissingFiles(List incrBackupFileList) throws IOException { + FileSystem fs = FileSystem.get(conf); + List list = new ArrayList(); + for(String file : incrBackupFileList){ + if(fs.exists(new Path(file))){ + list.add(file); + } else{ + LOG.warn("Can't find file: "+file); + } + } + return list; + } + + private String getMessage(Exception e) { + String msg = e.getMessage(); + if (msg == null || msg.equals("")) { + msg = e.getClass().getName(); + } + return msg; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java new file mode 100644 index 0000000..a4b0a0a --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java @@ -0,0 +1,512 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState; +import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; +import org.apache.hadoop.hbase.backup.impl.BackupUtil.BackupCompleteData; +import org.apache.hadoop.hbase.backup.master.BackupLogCleaner; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; + +/** + * Handles backup requests on server-side, creates backup context records in hbase:backup + * to keep track backup. The timestamps kept in hbase:backup table will be used for future + * incremental backup. Creates BackupContext and DispatchRequest. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BackupManager implements Closeable { + private static final Log LOG = LogFactory.getLog(BackupManager.class); + + private Configuration conf = null; + private BackupContext backupContext = null; + + private ExecutorService pool = null; + + private boolean backupComplete = false; + + private BackupSystemTable systemTable; + + private final Connection conn; + + /** + * Backup manager constructor. + * @param conf configuration + * @throws IOException exception + */ + public BackupManager(Configuration conf) throws IOException { + if (!conf.getBoolean(HConstants.BACKUP_ENABLE_KEY, HConstants.BACKUP_ENABLE_DEFAULT)) { + throw new BackupException("HBase backup is not enabled. Check your " + + HConstants.BACKUP_ENABLE_KEY + " setting."); + } + this.conf = conf; + this.conn = ConnectionFactory.createConnection(conf); // TODO: get Connection from elsewhere? + this.systemTable = new BackupSystemTable(conn); + Runtime.getRuntime().addShutdownHook(new ExitHandler()); + } + + /** + * This method modifies the master's configuration in order to inject backup-related features + * @param conf configuration + */ + public static void decorateMasterConfiguration(Configuration conf) { + if (!isBackupEnabled(conf)) { + return; + } + String plugins = conf.get(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS); + String cleanerClass = BackupLogCleaner.class.getCanonicalName(); + if (!plugins.contains(cleanerClass)) { + conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, plugins + "," + cleanerClass); + } + if (LOG.isTraceEnabled()) { + LOG.trace("Added log cleaner: " + cleanerClass); + } + } + + private static boolean isBackupEnabled(Configuration conf) { + return conf.getBoolean(HConstants.BACKUP_ENABLE_KEY, HConstants.BACKUP_ENABLE_DEFAULT); + } + + // TODO: remove this on the server side + private class ExitHandler extends Thread { + public ExitHandler() { + super("Backup Manager Exit Handler"); + } + + @Override + public void run() { + if (backupContext != null && !backupComplete) { + + // program exit and backup is not complete, then mark as cancelled to avoid submitted backup + // handler's taking further action + backupContext.setCancelled(true); + + LOG.debug("Backup is cancelled due to force program exiting."); + try { + cancelBackup(backupContext.getBackupId()); + } catch (Exception e) { + String msg = e.getMessage(); + if (msg == null || msg.equals("")) { + msg = e.getClass().getName(); + } + LOG.error("Failed to cancel backup " + backupContext.getBackupId() + " due to " + msg); + } + } + close(); + } + } + + /** + * Get configuration + * @return configuration + */ + Configuration getConf() { + return conf; + } + + /** + * Cancel the ongoing backup via backup id. + * @param backupId The id of the ongoing backup to be cancelled + * @throws Exception exception + */ + private void cancelBackup(String backupId) throws Exception { + // TODO: will be implemented in Phase 2: HBASE-14125 + LOG.debug("Try to cancel the backup " + backupId + ". the feature is NOT implemented yet"); + + } + + /** + * Stop all the work of backup. + */ + @Override + public void close() { + // currently, we shutdown now for all ongoing back handlers, we may need to do something like + // record the failed list somewhere later + if (this.pool != null) { + this.pool.shutdownNow(); + } + if (systemTable != null) { + try{ + systemTable.close(); + } catch(Exception e){ + LOG.error(e); + } + } + if (conn != null) { + try { + conn.close(); + } catch (IOException e) { + LOG.error(e); + } + } + } + + /** + * Create a BackupContext based on input backup request. + * @param backupId backup id + * @param type type + * @param tablelist table list + * @param targetRootDir root dir + * @param snapshot snapshot name + * @return BackupContext context + * @throws BackupException exception + */ + protected BackupContext createBackupContext(String backupId, BackupType type, + List tableList, String targetRootDir) throws BackupException { + + if (targetRootDir == null) { + throw new BackupException("Wrong backup request parameter: target backup root directory"); + } + + if (type == BackupType.FULL && tableList == null) { + // If table list is null for full backup, which means backup all tables. Then fill the table + // list with all user tables from meta. It no table available, throw the request exception. + + HTableDescriptor[] htds = null; + try (Admin hbadmin = conn.getAdmin()) { + htds = hbadmin.listTables(); + } catch (Exception e) { + throw new BackupException(e); + } + + if (htds == null) { + throw new BackupException("No table exists for full backup of all tables."); + } else { + tableList = new ArrayList<>(); + for (HTableDescriptor hTableDescriptor : htds) { + tableList.add(hTableDescriptor.getTableName()); + } + + LOG.info("Full backup all the tables available in the cluster: " + tableList); + } + } + + // there are one or more tables in the table list + return new BackupContext(backupId, type, tableList.toArray(new TableName[tableList.size()]), + targetRootDir); + } + + /** + * Check if any ongoing backup. Currently, we only reply on checking status in hbase:backup. We + * need to consider to handle the case of orphan records in the future. Otherwise, all the coming + * request will fail. + * @return the ongoing backup id if on going backup exists, otherwise null + * @throws IOException exception + */ + private String getOngoingBackupId() throws IOException { + + ArrayList sessions = systemTable.getBackupContexts(BackupState.RUNNING); + if (sessions.size() == 0) { + return null; + } + return sessions.get(0).getBackupId(); + } + + /** + * Start the backup manager service. + * @throws IOException exception + */ + public void initialize() throws IOException { + String ongoingBackupId = this.getOngoingBackupId(); + if (ongoingBackupId != null) { + LOG.info("There is a ongoing backup " + ongoingBackupId + + ". Can not launch new backup until no ongoing backup remains."); + throw new BackupException("There is ongoing backup."); + } + + // Initialize thread pools + int nrThreads = this.conf.getInt("hbase.backup.threads.max", 1); + ThreadFactoryBuilder builder = new ThreadFactoryBuilder(); + builder.setNameFormat("BackupHandler-%1$d"); + this.pool = + new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS, + new LinkedBlockingQueue(), builder.build()); + ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true); + } + + /** + * Dispatch and handle a backup request. + * @param backupContext backup context + * @throws BackupException exception + */ + public void dispatchRequest(BackupContext backupContext) throws BackupException { + + this.backupContext = backupContext; + + LOG.info("Got a backup request: " + "Type: " + backupContext.getType() + "; Tables: " + + backupContext.getTableNames() + "; TargetRootDir: " + backupContext.getTargetRootDir()); + + // dispatch the request to a backup handler and put it handler map + + BackupHandler handler = new BackupHandler(this.backupContext, this, conf, this.conn); + Future future = this.pool.submit(handler); + // wait for the execution to complete + try { + future.get(); + } catch (InterruptedException e) { + throw new BackupException(e); + } catch (CancellationException e) { + throw new BackupException(e); + } catch (ExecutionException e) { + throw new BackupException(e); + } + + // mark the backup complete for exit handler's processing + backupComplete = true; + + LOG.info("Backup request " + backupContext.getBackupId() + " has been executed."); + } + + /** + * Get direct ancestors of the current backup. + * @param backupCtx The backup context for the current backup + * @return The ancestors for the current backup + * @throws IOException exception + * @throws BackupException exception + */ + protected ArrayList getAncestors(BackupContext backupCtx) throws IOException, + BackupException { + LOG.debug("Getting the direct ancestors of the current backup ..."); + + ArrayList ancestors = new ArrayList(); + + // full backup does not have ancestor + if (backupCtx.getType() == BackupType.FULL) { + LOG.debug("Current backup is a full backup, no direct ancestor for it."); + return ancestors; + } + + // get all backup history list in descending order + + ArrayList allHistoryList = getBackupHistory(); + for (BackupCompleteData backup : allHistoryList) { + BackupImage image = + new BackupImage(backup.getBackupToken(), BackupType.valueOf(backup.getType()), + backup.getBackupRootPath(), + backup.getTableList(), Long.parseLong(backup.getStartTime()), Long.parseLong(backup + .getEndTime())); + // add the full backup image as an ancestor until the last incremental backup + if (backup.getType().equals(BackupType.FULL.toString())) { + // check the backup image coverage, if previous image could be covered by the newer ones, + // then no need to add + if (!BackupManifest.canCoverImage(ancestors, image)) { + ancestors.add(image); + } + } else { + // found last incremental backup, if previously added full backup ancestor images can cover + // it, then this incremental ancestor is not the dependent of the current incremental + // backup, that is to say, this is the backup scope boundary of current table set. + // Otherwise, this incremental backup ancestor is the dependent ancestor of the ongoing + // incremental backup + if (BackupManifest.canCoverImage(ancestors, image)) { + LOG.debug("Met the backup boundary of the current table set. " + + "The root full backup images for the current backup scope:"); + for (BackupImage image1 : ancestors) { + LOG.debug(" BackupId: " + image1.getBackupId() + ", Backup directory: " + + image1.getRootDir()); + } + } else { + Path logBackupPath = + HBackupFileSystem.getLogBackupPath(backup.getBackupRootPath(), + backup.getBackupToken()); + LOG.debug("Current backup has an incremental backup ancestor, " + + "touching its image manifest in " + logBackupPath.toString() + + " to construct the dependency."); + + BackupManifest lastIncrImgManifest = new BackupManifest(conf, logBackupPath); + BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage(); + ancestors.add(lastIncrImage); + + LOG.debug("Last dependent incremental backup image information:"); + LOG.debug(" Token: " + lastIncrImage.getBackupId()); + LOG.debug(" Backup directory: " + lastIncrImage.getRootDir()); + } + } + } + LOG.debug("Got " + ancestors.size() + " ancestors for the current backup."); + return ancestors; + } + + /** + * Get the direct ancestors of this backup for one table involved. + * @param backupContext backup context + * @param table table + * @return backupImages on the dependency list + * @throws BackupException exception + * @throws IOException exception + */ + protected ArrayList getAncestors(BackupContext backupContext, TableName table) + throws BackupException, IOException { + ArrayList ancestors = getAncestors(backupContext); + ArrayList tableAncestors = new ArrayList(); + for (BackupImage image : ancestors) { + if (image.hasTable(table)) { + tableAncestors.add(image); + if (image.getType() == BackupType.FULL) { + break; + } + } + } + return tableAncestors; + } + + /* + * hbase:backup operations + */ + + /** + * Updates status (state) of a backup session in a persistent store + * @param context context + * @throws IOException exception + */ + public void updateBackupStatus(BackupContext context) throws IOException { + systemTable.updateBackupStatus(context); + } + + /** + * Read the last backup start code (timestamp) of last successful backup. Will return null + * if there is no startcode stored in hbase:backup or the value is of length 0. These two + * cases indicate there is no successful backup completed so far. + * @return the timestamp of a last successful backup + * @throws IOException exception + */ + public String readBackupStartCode() throws IOException { + return systemTable.readBackupStartCode(); + } + + /** + * Write the start code (timestamp) to hbase:backup. If passed in null, then write 0 byte. + * @param startCode start code + * @throws IOException exception + */ + public void writeBackupStartCode(Long startCode) throws IOException { + systemTable.writeBackupStartCode(startCode); + } + + /** + * Get the RS log information after the last log roll from hbase:backup. + * @return RS log info + * @throws IOException exception + */ + public HashMap readRegionServerLastLogRollResult() throws IOException { + return systemTable.readRegionServerLastLogRollResult(); + } + + /** + * Get all completed backup information (in desc order by time) + * @return history info of BackupCompleteData + * @throws IOException exception + */ + public ArrayList getBackupHistory() throws IOException { + return systemTable.getBackupHistory(); + } + + /** + * Write the current timestamps for each regionserver to hbase:backup after a successful full or + * incremental backup. Each table may have a different set of log timestamps. The saved timestamp + * is of the last log file that was backed up already. + * @param tables tables + * @throws IOException exception + */ + public void writeRegionServerLogTimestamp(Set tables, + HashMap newTimestamps) throws IOException { + systemTable.writeRegionServerLogTimestamp(tables, newTimestamps); + } + + /** + * Read the timestamp for each region server log after the last successful backup. Each table has + * its own set of the timestamps. + * @return the timestamp for each region server. key: tableName value: + * RegionServer,PreviousTimeStamp + * @throws IOException exception + */ + public HashMap> readLogTimestampMap() throws IOException { + return systemTable.readLogTimestampMap(); + } + + /** + * Return the current tables covered by incremental backup. + * @return set of tableNames + * @throws IOException exception + */ + public Set getIncrementalBackupTableSet() throws IOException { + return systemTable.getIncrementalBackupTableSet(); + } + + /** + * Adds set of tables to overall incremental backup table set + * @param tables tables + * @throws IOException exception + */ + public void addIncrementalBackupTableSet(Set tables) throws IOException { + systemTable.addIncrementalBackupTableSet(tables); + } + + /** + * Saves list of WAL files after incremental backup operation. These files will be stored until + * TTL expiration and are used by Backup Log Cleaner plugin to determine which WAL files can be + * safely purged. + */ + public void recordWALFiles(List files) throws IOException { + systemTable.addWALFiles(files, backupContext.getBackupId()); + } + + /** + * Get WAL files iterator + * @return WAL files iterator from hbase:backup + * @throws IOException + */ + public Iterator getWALFilesFromBackupSystem() throws IOException { + return systemTable.getWALFilesIterator(); + } + + public Connection getConnection() { + return conn; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java new file mode 100644 index 0000000..6264fc5 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java @@ -0,0 +1,762 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.TreeMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.util.FSUtils; + +import com.google.protobuf.InvalidProtocolBufferException; + + +/** + * Backup manifest Contains all the meta data of a backup image. The manifest info will be bundled + * as manifest file together with data. So that each backup image will contain all the info needed + * for restore. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BackupManifest { + + private static final Log LOG = LogFactory.getLog(BackupManifest.class); + + // manifest file name + public static final String MANIFEST_FILE_NAME = ".backup.manifest"; + + // manifest file version, current is 1.0 + public static final String MANIFEST_VERSION = "1.0"; + + // backup image, the dependency graph is made up by series of backup images + + public static class BackupImage implements Comparable { + + private String backupId; + private BackupType type; + private String rootDir; + private List tableList; + private long startTs; + private long completeTs; + private ArrayList ancestors; + + public BackupImage() { + super(); + } + + public BackupImage(String backupId, BackupType type, String rootDir, + List tableList, long startTs, long completeTs) { + this.backupId = backupId; + this.type = type; + this.rootDir = rootDir; + this.tableList = tableList; + this.startTs = startTs; + this.completeTs = completeTs; + } + + static BackupImage fromProto(BackupProtos.BackupImage im) { + String backupId = im.getBackupId(); + String rootDir = im.getRootDir(); + long startTs = im.getStartTs(); + long completeTs = im.getCompleteTs(); + List tableListList = im.getTableListList(); + List tableList = new ArrayList(); + for(HBaseProtos.TableName tn : tableListList) { + tableList.add(ProtobufUtil.toTableName(tn)); + } + BackupType type = + im.getBackupType() == BackupProtos.BackupType.FULL ? BackupType.FULL: + BackupType.INCREMENTAL; + + return new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs); + } + + BackupProtos.BackupImage toProto() { + BackupProtos.BackupImage.Builder builder = BackupProtos.BackupImage.newBuilder(); + builder.setBackupId(backupId); + builder.setCompleteTs(completeTs); + builder.setStartTs(startTs); + builder.setRootDir(rootDir); + if (type == BackupType.FULL) { + builder.setBackupType(BackupProtos.BackupType.FULL); + } else{ + builder.setBackupType(BackupProtos.BackupType.INCREMENTAL); + } + + for (TableName name: tableList) { + builder.addTableList(ProtobufUtil.toProtoTableName(name)); + } + + if (ancestors != null){ + for (BackupImage im: ancestors){ + builder.addAncestors(im.toProto()); + } + } + + return builder.build(); + } + + public String getBackupId() { + return backupId; + } + + public void setBackupId(String backupId) { + this.backupId = backupId; + } + + public BackupType getType() { + return type; + } + + public void setType(BackupType type) { + this.type = type; + } + + public String getRootDir() { + return rootDir; + } + + public void setRootDir(String rootDir) { + this.rootDir = rootDir; + } + + public List getTableNames() { + return tableList; + } + + public void setTableList(List tableList) { + this.tableList = tableList; + } + + public long getStartTs() { + return startTs; + } + + public void setStartTs(long startTs) { + this.startTs = startTs; + } + + public long getCompleteTs() { + return completeTs; + } + + public void setCompleteTs(long completeTs) { + this.completeTs = completeTs; + } + + public ArrayList getAncestors() { + if (this.ancestors == null) { + this.ancestors = new ArrayList(); + } + return this.ancestors; + } + + public void addAncestor(BackupImage backupImage) { + this.getAncestors().add(backupImage); + } + + public boolean hasAncestor(String token) { + for (BackupImage image : this.getAncestors()) { + if (image.getBackupId().equals(token)) { + return true; + } + } + return false; + } + + public boolean hasTable(TableName table) { + for (TableName t : tableList) { + if (t.getNameAsString().equals(table)) { + return true; + } + } + return false; + } + + @Override + public int compareTo(BackupImage other) { + String thisBackupId = this.getBackupId(); + String otherBackupId = other.getBackupId(); + Long thisTS = new Long(thisBackupId.substring(thisBackupId.lastIndexOf("_") + 1)); + Long otherTS = new Long(otherBackupId.substring(otherBackupId.lastIndexOf("_") + 1)); + return thisTS.compareTo(otherTS); + } + } + + // manifest version + private String version = MANIFEST_VERSION; + + // hadoop hbase configuration + protected Configuration config = null; + + // backup root directory + private String rootDir = null; + + // backup image directory + private String tableBackupDir = null; + + // backup log directory if this is an incremental backup + private String logBackupDir = null; + + // backup token + private String backupId; + + // backup type, full or incremental + private BackupType type; + + // the table list for the backup + private ArrayList tableList; + + // actual start timestamp of the backup process + private long startTs; + + // actual complete timestamp of the backup process + private long completeTs; + + // total bytes for table backup image + private long totalBytes; + + // total bytes for the backed-up logs for incremental backup + private long logBytes; + + // the region server timestamp for tables: + // > + private Map> incrTimeRanges; + + // dependency of this backup, including all the dependent images to do PIT recovery + private Map dependency; + + // the indicator of the image compaction + private boolean isCompacted = false; + /** + * Construct manifest for a ongoing backup. + * @param backupCtx The ongoing backup context + */ + public BackupManifest(BackupContext backupCtx) { + this.backupId = backupCtx.getBackupId(); + this.type = backupCtx.getType(); + this.rootDir = backupCtx.getTargetRootDir(); + if (this.type == BackupType.INCREMENTAL) { + this.logBackupDir = backupCtx.getHLogTargetDir(); + this.logBytes = backupCtx.getTotalBytesCopied(); + } + this.startTs = backupCtx.getStartTs(); + this.completeTs = backupCtx.getEndTs(); + this.loadTableList(backupCtx.getTableNames()); + } + + /** + * Construct a table level manifest for a backup of the named table. + * @param backupCtx The ongoing backup context + */ + public BackupManifest(BackupContext backupCtx, TableName table) { + this.backupId = backupCtx.getBackupId(); + this.type = backupCtx.getType(); + this.rootDir = backupCtx.getTargetRootDir(); + this.tableBackupDir = backupCtx.getBackupStatus(table).getTargetDir(); + if (this.type == BackupType.INCREMENTAL) { + this.logBackupDir = backupCtx.getHLogTargetDir(); + this.logBytes = backupCtx.getTotalBytesCopied(); + } + this.startTs = backupCtx.getStartTs(); + this.completeTs = backupCtx.getEndTs(); + List tables = new ArrayList(); + tables.add(table); + this.loadTableList(tables); + } + + /** + * Construct manifest from a backup directory. + * @param conf configuration + * @param backupPath backup path + * @throws BackupException exception + */ + + public BackupManifest(Configuration conf, Path backupPath) throws BackupException { + if (LOG.isDebugEnabled()) { + LOG.debug("Loading manifest from: " + backupPath.toString()); + } + // The input backupDir may not exactly be the backup table dir. + // It could be the backup log dir where there is also a manifest file stored. + // This variable's purpose is to keep the correct and original location so + // that we can store/persist it. + this.tableBackupDir = backupPath.toString(); + this.config = conf; + try { + + FileSystem fs = backupPath.getFileSystem(conf); + FileStatus[] subFiles = FSUtils.listStatus(fs, backupPath); + if (subFiles == null) { + String errorMsg = backupPath.toString() + " does not exist"; + LOG.error(errorMsg); + throw new IOException(errorMsg); + } + for (FileStatus subFile : subFiles) { + if (subFile.getPath().getName().equals(MANIFEST_FILE_NAME)) { + + // load and set manifest field from file content + FSDataInputStream in = fs.open(subFile.getPath()); + long len = subFile.getLen(); + byte[] pbBytes = new byte[(int) len]; + in.readFully(pbBytes); + BackupProtos.BackupManifest proto = null; + try{ + proto = parseFrom(pbBytes); + } catch(Exception e){ + throw new BackupException(e); + } + this.version = proto.getVersion(); + this.backupId = proto.getBackupId(); + this.type = BackupType.valueOf(proto.getType().name()); + // Here the parameter backupDir is where the manifest file is. + // There should always be a manifest file under: + // backupRootDir/namespace/table/backupId/.backup.manifest + this.rootDir = backupPath.getParent().getParent().getParent().toString(); + + Path p = backupPath.getParent(); + if (p.getName().equals(HConstants.HREGION_LOGDIR_NAME)) { + this.rootDir = p.getParent().toString(); + } else { + this.rootDir = p.getParent().getParent().toString(); + } + + loadTableList(proto); + this.startTs = proto.getStartTs(); + this.completeTs = proto.getCompleteTs(); + this.totalBytes = proto.getTotalBytes(); + if (this.type == BackupType.INCREMENTAL) { + this.logBytes = proto.getLogBytes(); + //TODO: convert will be implemented by future jira + } + + loadIncrementalTimestampMap(proto); + loadDependency(proto); + this.isCompacted = proto.getCompacted(); + //TODO: merge will be implemented by future jira + LOG.debug("Loaded manifest instance from manifest file: " + + FSUtils.getPath(subFile.getPath())); + return; + } + } + String errorMsg = "No manifest file found in: " + backupPath.toString(); + LOG.error(errorMsg); + throw new IOException(errorMsg); + + } catch (IOException e) { + LOG.error(e); + throw new BackupException(e.getMessage()); + } + } + + private void loadIncrementalTimestampMap(BackupProtos.BackupManifest proto) { + List list = proto.getTstMapList(); + if(list == null || list.size() == 0) return; + this.incrTimeRanges = new HashMap>(); + for(BackupProtos.TableServerTimestamp tst: list){ + TableName tn = ProtobufUtil.toTableName(tst.getTable()); + HashMap map = this.incrTimeRanges.get(tn); + if(map == null){ + map = new HashMap(); + this.incrTimeRanges.put(tn, map); + } + List listSt = tst.getServerTimestampList(); + for(BackupProtos.ServerTimestamp stm: listSt) { + map.put(stm.getServer(), stm.getTimestamp()); + } + } + } + + private void loadDependency(BackupProtos.BackupManifest proto) { + dependency = new HashMap(); + List list = proto.getDependentBackupImageList(); + for (BackupProtos.BackupImage im : list) { + dependency.put(im.getBackupId(), BackupImage.fromProto(im)); + } + } + + private void loadTableList(BackupProtos.BackupManifest proto) { + this.tableList = new ArrayList(); + List list = proto.getTableListList(); + for (HBaseProtos.TableName name: list) { + this.tableList.add(ProtobufUtil.toTableName(name)); + } + } + + public BackupType getType() { + return type; + } + + public void setType(BackupType type) { + this.type = type; + } + + /** + * Loads table list. + * @param tableList Table list + */ + private void loadTableList(List tableList) { + + this.tableList = this.getTableList(); + if (this.tableList.size() > 0) { + this.tableList.clear(); + } + for (int i = 0; i < tableList.size(); i++) { + this.tableList.add(tableList.get(i)); + } + + LOG.debug(tableList.size() + " tables exist in table set."); + } + + /** + * Get the table set of this image. + * @return The table set list + */ + public ArrayList getTableList() { + if (this.tableList == null) { + this.tableList = new ArrayList(); + } + return this.tableList; + } + + /** + * Persist the manifest file. + * @throws IOException IOException when storing the manifest file. + */ + + public void store(Configuration conf) throws BackupException { + byte[] data = toByteArray(); + // write the file, overwrite if already exist + Path manifestFilePath = + new Path(new Path((this.tableBackupDir != null ? this.tableBackupDir : this.logBackupDir)) + ,MANIFEST_FILE_NAME); + try { + FSDataOutputStream out = + manifestFilePath.getFileSystem(conf).create(manifestFilePath, true); + out.write(data); + out.close(); + } catch (IOException e) { + LOG.error(e); + throw new BackupException(e.getMessage()); + } + + LOG.debug("Manifestfilestored_to " + this.tableBackupDir != null ? this.tableBackupDir + : this.logBackupDir + Path.SEPARATOR + MANIFEST_FILE_NAME); + } + + /** + * Protobuf serialization + * @return The filter serialized using pb + */ + public byte[] toByteArray() { + BackupProtos.BackupManifest.Builder builder = BackupProtos.BackupManifest.newBuilder(); + builder.setVersion(this.version); + builder.setBackupId(this.backupId); + builder.setType(BackupProtos.BackupType.valueOf(this.type.name())); + setTableList(builder); + builder.setStartTs(this.startTs); + builder.setCompleteTs(this.completeTs); + builder.setTotalBytes(this.totalBytes); + if (this.type == BackupType.INCREMENTAL) { + builder.setLogBytes(this.logBytes); + } + setIncrementalTimestampMap(builder); + setDependencyMap(builder); + builder.setCompacted(this.isCompacted); + return builder.build().toByteArray(); + } + + private void setIncrementalTimestampMap(BackupProtos.BackupManifest.Builder builder) { + if (this.incrTimeRanges == null) return; + for (Entry> entry: this.incrTimeRanges.entrySet()) { + TableName key = entry.getKey(); + HashMap value = entry.getValue(); + BackupProtos.TableServerTimestamp.Builder tstBuilder = + BackupProtos.TableServerTimestamp.newBuilder(); + tstBuilder.setTable(ProtobufUtil.toProtoTableName(key)); + + for (String s : value.keySet()) { + BackupProtos.ServerTimestamp.Builder stBuilder = BackupProtos.ServerTimestamp.newBuilder(); + stBuilder.setServer(s); + stBuilder.setTimestamp(value.get(s)); + tstBuilder.addServerTimestamp(stBuilder.build()); + } + builder.addTstMap(tstBuilder.build()); + } + } + + private void setDependencyMap(BackupProtos.BackupManifest.Builder builder) { + for (BackupImage image: getDependency().values()) { + builder.addDependentBackupImage(image.toProto()); + } + } + + private void setTableList(BackupProtos.BackupManifest.Builder builder) { + for(TableName name: tableList){ + builder.addTableList(ProtobufUtil.toProtoTableName(name)); + } + } + + /** + * Parse protobuf from byte array + * @param pbBytes A pb serialized BackupManifest instance + * @return An instance of made from bytes + * @throws DeserializationException + */ + private static BackupProtos.BackupManifest parseFrom(final byte[] pbBytes) + throws DeserializationException { + BackupProtos.BackupManifest proto; + try { + proto = BackupProtos.BackupManifest.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return proto; + } + + /** + * Get manifest file version + * @return version + */ + public String getVersion() { + return version; + } + + /** + * Get this backup image. + * @return the backup image. + */ + public BackupImage getBackupImage() { + return this.getDependency().get(this.backupId); + } + + /** + * Add dependent backup image for this backup. + * @param image The direct dependent backup image + */ + public void addDependentImage(BackupImage image) { + this.getDependency().get(this.backupId).addAncestor(image); + this.setDependencyMap(this.getDependency(), image); + } + + + + /** + * Get all dependent backup images. The image of this backup is also contained. + * @return The dependent backup images map + */ + public Map getDependency() { + if (this.dependency == null) { + this.dependency = new HashMap(); + LOG.debug(this.rootDir + " " + this.backupId + " " + this.type); + this.dependency.put(this.backupId, + new BackupImage(this.backupId, this.type, this.rootDir, tableList, this.startTs, + this.completeTs)); + } + return this.dependency; + } + + /** + * Set the incremental timestamp map directly. + * @param incrTimestampMap timestamp map + */ + public void setIncrTimestampMap(HashMap> incrTimestampMap) { + this.incrTimeRanges = incrTimestampMap; + } + + + public Map> getIncrTimestampMap() { + if (this.incrTimeRanges == null) { + this.incrTimeRanges = new HashMap>(); + } + return this.incrTimeRanges; + } + + + /** + * Get the image list of this backup for restore in time order. + * @param reverse If true, then output in reverse order, otherwise in time order from old to new + * @return the backup image list for restore in time order + */ + public ArrayList getRestoreDependentList(boolean reverse) { + TreeMap restoreImages = new TreeMap(); + for (BackupImage image : this.getDependency().values()) { + restoreImages.put(Long.valueOf(image.startTs), image); + } + return new ArrayList(reverse ? (restoreImages.descendingMap().values()) + : (restoreImages.values())); + } + + /** + * Get the dependent image list for a specific table of this backup in time order from old to new + * if want to restore to this backup image level. + * @param table table + * @return the backup image list for a table in time order + */ + public ArrayList getDependentListByTable(TableName table) { + ArrayList tableImageList = new ArrayList(); + ArrayList imageList = getRestoreDependentList(true); + for (BackupImage image : imageList) { + if (image.hasTable(table)) { + tableImageList.add(image); + if (image.getType() == BackupType.FULL) { + break; + } + } + } + Collections.reverse(tableImageList); + return tableImageList; + } + + /** + * Get the full dependent image list in the whole dependency scope for a specific table of this + * backup in time order from old to new. + * @param table table + * @return the full backup image list for a table in time order in the whole scope of the + * dependency of this image + */ + public ArrayList getAllDependentListByTable(TableName table) { + ArrayList tableImageList = new ArrayList(); + ArrayList imageList = getRestoreDependentList(false); + for (BackupImage image : imageList) { + if (image.hasTable(table)) { + tableImageList.add(image); + } + } + return tableImageList; + } + + + /** + * Recursively set the dependency map of the backup images. + * @param map The dependency map + * @param image The backup image + */ + private void setDependencyMap(Map map, BackupImage image) { + if (image == null) { + return; + } else { + map.put(image.getBackupId(), image); + for (BackupImage img : image.getAncestors()) { + setDependencyMap(map, img); + } + } + } + + /** + * Check whether backup image1 could cover backup image2 or not. + * @param image1 backup image 1 + * @param image2 backup image 2 + * @return true if image1 can cover image2, otherwise false + */ + public static boolean canCoverImage(BackupImage image1, BackupImage image2) { + // image1 can cover image2 only when the following conditions are satisfied: + // - image1 must not be an incremental image; + // - image1 must be taken after image2 has been taken; + // - table set of image1 must cover the table set of image2. + if (image1.getType() == BackupType.INCREMENTAL) { + return false; + } + if (image1.getStartTs() < image2.getStartTs()) { + return false; + } + List image1TableList = image1.getTableNames(); + List image2TableList = image2.getTableNames(); + boolean found = false; + for (int i = 0; i < image2TableList.size(); i++) { + found = false; + for (int j = 0; j < image1TableList.size(); j++) { + if (image2TableList.get(i).equals(image1TableList.get(j))) { + found = true; + break; + } + } + if (!found) { + return false; + } + } + + LOG.debug("Backup image " + image1.getBackupId() + " can cover " + image2.getBackupId()); + return true; + } + + /** + * Check whether backup image set could cover a backup image or not. + * @param fullImages The backup image set + * @param image The target backup image + * @return true if fullImages can cover image, otherwise false + */ + public static boolean canCoverImage(ArrayList fullImages, BackupImage image) { + // fullImages can cover image only when the following conditions are satisfied: + // - each image of fullImages must not be an incremental image; + // - each image of fullImages must be taken after image has been taken; + // - sum table set of fullImages must cover the table set of image. + for (BackupImage image1 : fullImages) { + if (image1.getType() == BackupType.INCREMENTAL) { + return false; + } + if (image1.getStartTs() < image.getStartTs()) { + return false; + } + } + + ArrayList image1TableList = new ArrayList(); + for (BackupImage image1 : fullImages) { + List tableList = image1.getTableNames(); + for (TableName table : tableList) { + image1TableList.add(table.getNameAsString()); + } + } + ArrayList image2TableList = new ArrayList(); + List tableList = image.getTableNames(); + for (TableName table : tableList) { + image2TableList.add(table.getNameAsString()); + } + + for (int i = 0; i < image2TableList.size(); i++) { + if (image1TableList.contains(image2TableList.get(i)) == false) { + return false; + } + } + + LOG.debug("Full image set can cover image " + image.getBackupId()); + return true; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java new file mode 100644 index 0000000..d0ce059 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * HConstants holds a bunch of HBase Backup and Restore constants + */ +@InterfaceAudience.Private +@InterfaceStability.Stable +public final class BackupRestoreConstants { + + + // delimiter in tablename list in restore command + public static final String TABLENAME_DELIMITER_IN_COMMAND = ","; + + public static final String CONF_STAGING_ROOT = "snapshot.export.staging.root"; + + public static final String BACKUPID_PREFIX = "backup_"; + + public static enum BackupCommand { + CREATE, CANCEL, DELETE, DESCRIBE, HISTORY, STATUS, CONVERT, MERGE, STOP, SHOW, HELP, + } + + private BackupRestoreConstants() { + // Can't be instantiated with this ctor. + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSnapshotCopy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSnapshotCopy.java new file mode 100644 index 0000000..8b8a83f --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSnapshotCopy.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import org.apache.hadoop.hbase.snapshot.ExportSnapshot; + +/* this class will be extended in future jira to support progress report */ +public class BackupSnapshotCopy extends ExportSnapshot { + private BackupHandler backupHandler; + private String table; + + public BackupSnapshotCopy(BackupHandler backupHandler, String table) { + super(); + this.backupHandler = backupHandler; + this.table = table; + } + + public BackupHandler getBackupHandler() { + return this.backupHandler; + } + + public String getTable() { + return this.table; + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupStatus.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupStatus.java new file mode 100644 index 0000000..6e54994 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupStatus.java @@ -0,0 +1,105 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.Serializable; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; + +/** + * Backup status and related information encapsulated for a table. + * At this moment only TargetDir and SnapshotName is encapsulated here. + * future Jira will be implemented for progress, bytesCopies, phase, etc. + */ + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BackupStatus implements Serializable { + + private static final long serialVersionUID = -5968397963548535982L; + + // table name for backup + private TableName table; + + // target directory of the backup image for this table + private String targetDir; + + // snapshot name for offline/online snapshot + private String snapshotName = null; + + public BackupStatus() { + + } + + public BackupStatus(TableName table, String targetRootDir, String backupId) { + this.table = table; + this.targetDir = HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table); + } + + public String getSnapshotName() { + return snapshotName; + } + + public void setSnapshotName(String snapshotName) { + this.snapshotName = snapshotName; + } + + public String getTargetDir() { + return targetDir; + } + + public TableName getTable() { + return table; + } + + public void setTable(TableName table) { + this.table = table; + } + + public void setTargetDir(String targetDir) { + this.targetDir = targetDir; + } + + public static BackupStatus convert(BackupProtos.TableBackupStatus proto) + { + BackupStatus bs = new BackupStatus(); + bs.setTable(ProtobufUtil.toTableName(proto.getTable())); + bs.setTargetDir(proto.getTargetDir()); + if(proto.hasSnapshot()){ + bs.setSnapshotName(proto.getSnapshot()); + } + return bs; + } + + public BackupProtos.TableBackupStatus toProto() { + BackupProtos.TableBackupStatus.Builder builder = + BackupProtos.TableBackupStatus.newBuilder(); + if(snapshotName != null) { + builder.setSnapshot(snapshotName); + } + builder.setTable(ProtobufUtil.toProtoTableName(table)); + builder.setTargetDir(targetDir); + return builder.build(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java new file mode 100644 index 0000000..18a0f06 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -0,0 +1,571 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.impl; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeSet; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState; +import org.apache.hadoop.hbase.backup.impl.BackupUtil.BackupCompleteData; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; + +/** + * This class provides 'hbase:backup' table API + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class BackupSystemTable implements Closeable { + + private static final Log LOG = LogFactory.getLog(BackupSystemTable.class); + private final static String TABLE_NAMESPACE = "hbase"; + private final static String TABLE_NAME = "backup"; + private final static TableName tableName = TableName.valueOf(TABLE_NAMESPACE, TABLE_NAME); + final static byte[] familyName = "f".getBytes(); + + // Connection to HBase cluster, shared + // among all instances + private final Connection connection; + // Cluster configuration + private final Configuration conf; + + /** + * Create a BackupSystemTable object for the given Connection. Connection is NOT owned by this + * instance and has to be closed explicitly. + * @param connection + * @throws IOException + */ + public BackupSystemTable(Connection connection) throws IOException { + this.connection = connection; + this.conf = connection.getConfiguration(); + + createSystemTableIfNotExists(); + } + + @Override + public void close() { + } + + /** + * Gets table name + * @return table name + */ + public static TableName getTableName() { + return tableName; + } + + private void createSystemTableIfNotExists() throws IOException { + try(Admin admin = connection.getAdmin()) { + if (admin.tableExists(tableName) == false) { + HTableDescriptor tableDesc = new HTableDescriptor(tableName); + HColumnDescriptor colDesc = new HColumnDescriptor(familyName); + colDesc.setMaxVersions(1); + int ttl = + conf.getInt(HConstants.BACKUP_SYSTEM_TTL_KEY, HConstants.BACKUP_SYSTEM_TTL_DEFAULT); + colDesc.setTimeToLive(ttl); + tableDesc.addFamily(colDesc); + admin.createTable(tableDesc); + } + } catch (IOException e) { + LOG.error(e); + throw e; + } + } + + /** + * Updates status (state) of a backup session in hbase:backup table + * @param context context + * @throws IOException exception + */ + public void updateBackupStatus(BackupContext context) throws IOException { + + if (LOG.isDebugEnabled()) { + LOG.debug("update backup status in hbase:backup for: " + context.getBackupId() + + " set status=" + context.getState()); + } + try (Table table = connection.getTable(tableName)) { + Put put = BackupSystemTableHelper.createPutForBackupContext(context); + table.put(put); + } + } + + /** + * Deletes backup status from hbase:backup table + * @param backupId backup id + * @throws IOException exception + */ + + public void deleteBackupStatus(String backupId) throws IOException { + + if (LOG.isDebugEnabled()) { + LOG.debug("delete backup status in hbase:backup for " + backupId); + } + try (Table table = connection.getTable(tableName)) { + Delete del = BackupSystemTableHelper.createDeletForBackupContext(backupId); + table.delete(del); + } + } + + /** + * Reads backup status object (instance of BackupContext) from hbase:backup table + * @param backupId - backupId + * @return Current status of backup session or null + */ + + public BackupContext readBackupStatus(String backupId) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("read backup status from hbase:backup for: " + backupId); + } + + try (Table table = connection.getTable(tableName)) { + Get get = BackupSystemTableHelper.createGetForBackupContext(backupId); + Result res = table.get(get); + if(res.isEmpty()){ + return null; + } + return BackupSystemTableHelper.resultToBackupContext(res); + } + } + + /** + * Read the last backup start code (timestamp) of last successful backup. Will return null if + * there is no start code stored on hbase or the value is of length 0. These two cases indicate + * there is no successful backup completed so far. + * @return the timestamp of last successful backup + * @throws IOException exception + */ + public String readBackupStartCode() throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("read backup start code from hbase:backup"); + } + try (Table table = connection.getTable(tableName)) { + Get get = BackupSystemTableHelper.createGetForStartCode(); + Result res = table.get(get); + if (res.isEmpty()) { + return null; + } + Cell cell = res.listCells().get(0); + byte[] val = CellUtil.cloneValue(cell); + if (val.length == 0){ + return null; + } + return new String(val); + } + } + + /** + * Write the start code (timestamp) to hbase:backup. If passed in null, then write 0 byte. + * @param startCode start code + * @throws IOException exception + */ + public void writeBackupStartCode(Long startCode) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("write backup start code to hbase:backup " + startCode); + } + try (Table table = connection.getTable(tableName)) { + Put put = BackupSystemTableHelper.createPutForStartCode(startCode.toString()); + table.put(put); + } + } + + /** + * Get the Region Servers log information after the last log roll from hbase:backup. + * @return RS log info + * @throws IOException exception + */ + public HashMap readRegionServerLastLogRollResult() + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("read region server last roll log result to hbase:backup"); + } + + Scan scan = BackupSystemTableHelper.createScanForReadRegionServerLastLogRollResult(); + scan.setMaxVersions(1); + + try (Table table = connection.getTable(tableName); + ResultScanner scanner = table.getScanner(scan)) { + Result res = null; + HashMap rsTimestampMap = new HashMap(); + while ((res = scanner.next()) != null) { + res.advance(); + Cell cell = res.current(); + byte[] row = CellUtil.cloneRow(cell); + String server = + BackupSystemTableHelper.getServerNameForReadRegionServerLastLogRollResult(row); + + byte[] data = CellUtil.cloneValue(cell); + rsTimestampMap.put(server, Long.parseLong(new String(data))); + } + return rsTimestampMap; + } + } + + /** + * Writes Region Server last roll log result (timestamp) to hbase:backup table + * @param server - Region Server name + * @param timestamp - last log timestamp + * @throws IOException exception + */ + public void writeRegionServerLastLogRollResult(String server, Long timestamp) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("write region server last roll log result to hbase:backup"); + } + try (Table table = connection.getTable(tableName)) { + Put put = + BackupSystemTableHelper.createPutForRegionServerLastLogRollResult(server, timestamp); + table.put(put); + } + } + + /** + * Get all completed backup information (in desc order by time) + * @return history info of BackupCompleteData + * @throws IOException exception + */ + public ArrayList getBackupHistory() throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("get backup history from hbase:backup"); + } + Scan scan = BackupSystemTableHelper.createScanForBackupHistory(); + scan.setMaxVersions(1); + + ArrayList list = new ArrayList(); + try (Table table = connection.getTable(tableName); + ResultScanner scanner = table.getScanner(scan)) { + + Result res = null; + while ((res = scanner.next()) != null) { + res.advance(); + BackupContext context = BackupSystemTableHelper.cellToBackupContext(res.current()); + if (context.getState() != BackupState.COMPLETE) { + continue; + } + + BackupCompleteData history = new BackupCompleteData(); + history.setBackupToken(context.getBackupId()); + history.setStartTime(Long.toString(context.getStartTs())); + history.setEndTime(Long.toString(context.getEndTs())); + history.setBackupRootPath(context.getTargetRootDir()); + history.setTableList(context.getTableNames()); + history.setType(context.getType().toString()); + history.setBytesCopied(Long.toString(context.getTotalBytesCopied())); + + list.add(history); + } + return BackupUtil.sortHistoryListDesc(list); + } + } + + /** + * Get all backup session with a given status (in desc order by time) + * @param status status + * @return history info of backup contexts + * @throws IOException exception + */ + public ArrayList getBackupContexts(BackupState status) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("get backup contexts from hbase:backup"); + } + + Scan scan = BackupSystemTableHelper.createScanForBackupHistory(); + scan.setMaxVersions(1); + ArrayList list = new ArrayList(); + + try (Table table = connection.getTable(tableName); + ResultScanner scanner = table.getScanner(scan)) { + Result res = null; + while ((res = scanner.next()) != null) { + res.advance(); + BackupContext context = BackupSystemTableHelper.cellToBackupContext(res.current()); + if (context.getState() != status){ + continue; + } + list.add(context); + } + return list; + } + } + + /** + * Write the current timestamps for each regionserver to hbase:backup after a successful full or + * incremental backup. The saved timestamp is of the last log file that was backed up already. + * @param tables tables + * @param newTimestamps timestamps + * @throws IOException exception + */ + public void writeRegionServerLogTimestamp(Set tables, + HashMap newTimestamps) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("write RS log ts to HBASE_BACKUP"); + } + List puts = new ArrayList(); + for (TableName table : tables) { + byte[] smapData = toTableServerTimestampProto(table, newTimestamps).toByteArray(); + Put put = BackupSystemTableHelper.createPutForWriteRegionServerLogTimestamp(table, smapData); + puts.add(put); + } + try (Table table = connection.getTable(tableName)) { + table.put(puts); + } + } + + /** + * Read the timestamp for each region server log after the last successful backup. Each table has + * its own set of the timestamps. The info is stored for each table as a concatenated string of + * rs->timestapmp + * @return the timestamp for each region server. key: tableName value: + * RegionServer,PreviousTimeStamp + * @throws IOException exception + */ + public HashMap> readLogTimestampMap() throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("read RS log ts from HBASE_BACKUP"); + } + + HashMap> tableTimestampMap = + new HashMap>(); + + Scan scan = BackupSystemTableHelper.createScanForReadLogTimestampMap(); + try (Table table = connection.getTable(tableName); + ResultScanner scanner = table.getScanner(scan)) { + Result res = null; + while ((res = scanner.next()) != null) { + res.advance(); + Cell cell = res.current(); + byte[] row = CellUtil.cloneRow(cell); + String tabName = BackupSystemTableHelper.getTableNameForReadLogTimestampMap(row); + TableName tn = TableName.valueOf(tabName); + HashMap lastBackup = new HashMap(); + byte[] data = CellUtil.cloneValue(cell); + + if (data == null) { + throw new IOException("Data of last backup data from HBASE_BACKUP " + + "is empty. Create a backup first."); + } + if (data != null && data.length > 0) { + lastBackup = + fromTableServerTimestampProto(BackupProtos.TableServerTimestamp.parseFrom(data)); + tableTimestampMap.put(tn, lastBackup); + } + } + return tableTimestampMap; + } + } + + private BackupProtos.TableServerTimestamp toTableServerTimestampProto(TableName table, + Map map) { + BackupProtos.TableServerTimestamp.Builder tstBuilder = + BackupProtos.TableServerTimestamp.newBuilder(); + tstBuilder.setTable(ProtobufUtil.toProtoTableName(table)); + + for(Entry entry: map.entrySet()) { + BackupProtos.ServerTimestamp.Builder builder = BackupProtos.ServerTimestamp.newBuilder(); + builder.setServer(entry.getKey()); + builder.setTimestamp(entry.getValue()); + tstBuilder.addServerTimestamp(builder.build()); + } + + return tstBuilder.build(); + } + + private HashMap fromTableServerTimestampProto( + BackupProtos.TableServerTimestamp proto) { + HashMap map = new HashMap (); + List list = proto.getServerTimestampList(); + for(BackupProtos.ServerTimestamp st: list) { + map.put(st.getServer(), st.getTimestamp()); + } + return map; + } + + /** + * Return the current tables covered by incremental backup. + * @return set of tableNames + * @throws IOException exception + */ + public Set getIncrementalBackupTableSet() throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("get incr backup table set from hbase:backup"); + } + TreeSet set = new TreeSet<>(); + + try (Table table = connection.getTable(tableName)) { + Get get = BackupSystemTableHelper.createGetForIncrBackupTableSet(); + Result res = table.get(get); + if (res.isEmpty()) { + return set; + } + List cells = res.listCells(); + for (Cell cell : cells) { + // qualifier = table name - we use table names as qualifiers + set.add(TableName.valueOf(CellUtil.cloneQualifier(cell))); + } + return set; + } + } + + /** + * Add tables to global incremental backup set + * @param tables - set of tables + * @throws IOException exception + */ + public void addIncrementalBackupTableSet(Set tables) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("add incr backup table set to hbase:backup"); + } + try (Table table = connection.getTable(tableName)) { + Put put = BackupSystemTableHelper.createPutForIncrBackupTableSet(tables); + table.put(put); + } + } + + /** + * Register WAL files as eligible for deletion + * @param files files + * @throws IOException exception + */ + public void addWALFiles(List files, String backupId) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("add WAL files to hbase:backup"); + } + try (Table table = connection.getTable(tableName)) { + List puts = BackupSystemTableHelper.createPutsForAddWALFiles(files, backupId); + table.put(puts); + } + } + + /** + * Register WAL files as eligible for deletion + * @param files files + * @throws IOException exception + */ + public Iterator getWALFilesIterator() throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("get WAL files from hbase:backup"); + } + final Table table = connection.getTable(tableName); + Scan scan = BackupSystemTableHelper.createScanForGetWALs(); + final ResultScanner scanner = table.getScanner(scan); + final Iterator it = scanner.iterator(); + return new Iterator() { + + @Override + public boolean hasNext() { + boolean next = it.hasNext(); + if (!next) { + // close all + try { + scanner.close(); + table.close(); + } catch (Exception e) { + LOG.error(e); + } + } + return next; + } + + @Override + public String next() { + Result next = it.next(); + List cells = next.listCells(); + byte[] buf = cells.get(0).getValueArray(); + int len = cells.get(0).getValueLength(); + int offset = cells.get(0).getValueOffset(); + return new String(buf, offset, len); + } + + @Override + public void remove() { + // not implemented + throw new RuntimeException("remove is not supported"); + } + }; + + } + + /** + * Check if WAL file is eligible for deletion + * @param file file + * @return true, if - yes. + * @throws IOException exception + */ + public boolean checkWALFile(String file) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Check if WAL file has been already backuped in hbase:backup"); + } + try (Table table = connection.getTable(tableName)) { + Get get = BackupSystemTableHelper.createGetForCheckWALFile(file); + Result res = table.get(get); + if (res.isEmpty()){ + return false; + } + return true; + } + } + + /** + * Checks if we have at least one backup session in hbase:backup This API is used by + * BackupLogCleaner + * @return true, if - at least one session exists in hbase:backup table + * @throws IOException exception + */ + public boolean hasBackupSessions() throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("has backup sessions from hbase:backup"); + } + boolean result = false; + Scan scan = BackupSystemTableHelper.createScanForBackupHistory(); + scan.setMaxVersions(1); + scan.setCaching(1); + try (Table table = connection.getTable(tableName); + ResultScanner scanner = table.getScanner(scan)) { + if (scanner.next() != null) { + result = true; + } + return result; + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java new file mode 100644 index 0000000..ac096b7 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java @@ -0,0 +1,325 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.util.Bytes; + + +/** + * A collection for methods used by BackupSystemTable. + */ + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class BackupSystemTableHelper { + + /** + * hbase:backup schema: + * 1. Backup sessions rowkey= "session." + backupId; value = serialized + * BackupContext + * 2. Backup start code rowkey = "startcode"; value = startcode + * 3. Incremental backup set rowkey="incrbackupset"; value=[list of tables] + * 4. Table-RS-timestamp map rowkey="trslm"+ table_name; value = map[RS-> last WAL timestamp] + * 5. RS - WAL ts map rowkey="rslogts."+server; value = last WAL timestamp + * 6. WALs recorded rowkey="wals."+WAL unique file name; value = backuppId and full WAL file name + */ + + private final static String BACKUP_CONTEXT_PREFIX = "session."; + private final static String START_CODE_ROW = "startcode"; + private final static String INCR_BACKUP_SET = "incrbackupset"; + private final static String TABLE_RS_LOG_MAP_PREFIX = "trslm."; + private final static String RS_LOG_TS_PREFIX = "rslogts."; + private final static String WALS_PREFIX = "wals."; + + private final static byte[] col1 = "col1".getBytes(); + private final static byte[] col2 = "col2".getBytes(); + + private final static byte[] EMPTY_VALUE = new byte[] {}; + + private BackupSystemTableHelper() { + throw new AssertionError("Instantiating utility class..."); + } + + /** + * Creates Put operation for a given backup context object + * @param context backup context + * @return put operation + * @throws IOException exception + */ + static Put createPutForBackupContext(BackupContext context) throws IOException { + + Put put = new Put((BACKUP_CONTEXT_PREFIX + context.getBackupId()).getBytes()); + put.addColumn(BackupSystemTable.familyName, col1, context.toByteArray()); + return put; + } + + /** + * Creates Get operation for a given backup id + * @param backupId - backup's ID + * @return get operation + * @throws IOException exception + */ + static Get createGetForBackupContext(String backupId) throws IOException { + Get get = new Get((BACKUP_CONTEXT_PREFIX + backupId).getBytes()); + get.addFamily(BackupSystemTable.familyName); + get.setMaxVersions(1); + return get; + } + + /** + * Creates Delete operation for a given backup id + * @param backupId - backup's ID + * @return delete operation + * @throws IOException exception + */ + public static Delete createDeletForBackupContext(String backupId) { + Delete del = new Delete((BACKUP_CONTEXT_PREFIX + backupId).getBytes()); + del.addFamily(BackupSystemTable.familyName); + return del; + } + + /** + * Converts Result to BackupContext + * @param res - HBase result + * @return backup context instance + * @throws IOException exception + */ + static BackupContext resultToBackupContext(Result res) throws IOException { + res.advance(); + Cell cell = res.current(); + return cellToBackupContext(cell); + } + + /** + * Creates Get operation to retrieve start code from hbase:backup + * @return get operation + * @throws IOException exception + */ + static Get createGetForStartCode() throws IOException { + Get get = new Get(START_CODE_ROW.getBytes()); + get.addFamily(BackupSystemTable.familyName); + get.setMaxVersions(1); + return get; + } + + /** + * Creates Put operation to store start code to hbase:backup + * @return put operation + * @throws IOException exception + */ + static Put createPutForStartCode(String startCode) { + Put put = new Put(START_CODE_ROW.getBytes()); + put.addColumn(BackupSystemTable.familyName, col1, startCode.getBytes()); + return put; + } + + /** + * Creates Get to retrieve incremental backup table set from hbase:backup + * @return get operation + * @throws IOException exception + */ + static Get createGetForIncrBackupTableSet() throws IOException { + Get get = new Get(INCR_BACKUP_SET.getBytes()); + get.addFamily(BackupSystemTable.familyName); + get.setMaxVersions(1); + return get; + } + + /** + * Creates Put to store incremental backup table set + * @param tables tables + * @return put operation + */ + static Put createPutForIncrBackupTableSet(Set tables) { + Put put = new Put(INCR_BACKUP_SET.getBytes()); + for (TableName table : tables) { + put.addColumn(BackupSystemTable.familyName, Bytes.toBytes(table.getNameAsString()), + EMPTY_VALUE); + } + return put; + } + + /** + * Creates Scan operation to load backup history + * @return scan operation + */ + static Scan createScanForBackupHistory() { + Scan scan = new Scan(); + byte[] startRow = BACKUP_CONTEXT_PREFIX.getBytes(); + byte[] stopRow = Arrays.copyOf(startRow, startRow.length); + stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); + scan.setStartRow(startRow); + scan.setStopRow(stopRow); + scan.addFamily(BackupSystemTable.familyName); + + return scan; + } + + /** + * Converts cell to backup context instance. + * @param current - cell + * @return backup context instance + * @throws IOException exception + */ + static BackupContext cellToBackupContext(Cell current) throws IOException { + byte[] data = CellUtil.cloneValue(current); + return BackupContext.fromByteArray(data); + } + + /** + * Creates Put to write RS last roll log timestamp map + * @param table - table + * @param smap - map, containing RS:ts + * @return put operation + */ + static Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap) { + Put put = new Put((TABLE_RS_LOG_MAP_PREFIX + table).getBytes()); + put.addColumn(BackupSystemTable.familyName, col1, smap); + return put; + } + + /** + * Creates Scan to load table-> { RS -> ts} map of maps + * @return scan operation + */ + static Scan createScanForReadLogTimestampMap() { + Scan scan = new Scan(); + byte[] startRow = TABLE_RS_LOG_MAP_PREFIX.getBytes(); + byte[] stopRow = Arrays.copyOf(startRow, startRow.length); + stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); + scan.setStartRow(startRow); + scan.setStopRow(stopRow); + scan.addFamily(BackupSystemTable.familyName); + + return scan; + } + + /** + * Get table name from rowkey + * @param cloneRow rowkey + * @return table name + */ + static String getTableNameForReadLogTimestampMap(byte[] cloneRow) { + int prefixSize = TABLE_RS_LOG_MAP_PREFIX.length(); + return new String(cloneRow, prefixSize, cloneRow.length - prefixSize); + } + + /** + * Creates Put to store RS last log result + * @param server - server name + * @param timestamp - log roll result (timestamp) + * @return put operation + */ + static Put createPutForRegionServerLastLogRollResult(String server, Long timestamp) { + Put put = new Put((RS_LOG_TS_PREFIX + server).getBytes()); + put.addColumn(BackupSystemTable.familyName, col1, timestamp.toString().getBytes()); + return put; + } + + /** + * Creates Scan operation to load last RS log roll results + * @return scan operation + */ + static Scan createScanForReadRegionServerLastLogRollResult() { + Scan scan = new Scan(); + byte[] startRow = RS_LOG_TS_PREFIX.getBytes(); + byte[] stopRow = Arrays.copyOf(startRow, startRow.length); + stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); + scan.setStartRow(startRow); + scan.setStopRow(stopRow); + scan.addFamily(BackupSystemTable.familyName); + + return scan; + } + + /** + * Get server's name from rowkey + * @param row - rowkey + * @return server's name + */ + static String getServerNameForReadRegionServerLastLogRollResult(byte[] row) { + int prefixSize = RS_LOG_TS_PREFIX.length(); + return new String(row, prefixSize, row.length - prefixSize); + } + + /** + * Creates put list for list of WAL files + * @param files list of WAL file paths + * @param backupId backup id + * @return put list + * @throws IOException exception + */ + public static List createPutsForAddWALFiles(List files, String backupId) + throws IOException { + + List puts = new ArrayList(); + for (String file : files) { + byte[] row = (WALS_PREFIX + BackupUtil.getUniqueWALFileNamePart(file)).getBytes(); + Put put = new Put(row); + put.addColumn(BackupSystemTable.familyName, col1, backupId.getBytes()); + put.addColumn(BackupSystemTable.familyName, col2, file.getBytes()); + puts.add(put); + } + return puts; + } + + /** + * Creates Scan operation to load WALs + * @return scan operation + */ + public static Scan createScanForGetWALs() { + Scan scan = new Scan(); + byte[] startRow = WALS_PREFIX.getBytes(); + byte[] stopRow = Arrays.copyOf(startRow, startRow.length); + stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); + scan.setStartRow(startRow); + scan.setStopRow(stopRow); + scan.addColumn(BackupSystemTable.familyName, col2); + return scan; + } + /** + * Creates Get operation for a given wal file name + * @param file file + * @return get operation + * @throws IOException exception + */ + public static Get createGetForCheckWALFile(String file) throws IOException { + byte[] row = (WALS_PREFIX + BackupUtil.getUniqueWALFileNamePart(file)).getBytes(); + Get get = new Get(row); + get.addFamily(BackupSystemTable.familyName); + get.setMaxVersions(1); + return get; + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java new file mode 100644 index 0000000..9981f78 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupUtil.java @@ -0,0 +1,453 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URLDecoder; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; +import java.util.TreeMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.backup.BackupUtility; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.wal.DefaultWALProvider; + +/** + * A collection for methods used by multiple classes to backup HBase tables. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class BackupUtil { + protected static final Log LOG = LogFactory.getLog(BackupUtil.class); + public static final String LOGNAME_SEPARATOR = "."; + + private BackupUtil(){ + throw new AssertionError("Instantiating utility class..."); + } + + /** + * Loop through the RS log timestamp map for the tables, for each RS, find the min timestamp + * value for the RS among the tables. + * @param rsLogTimestampMap timestamp map + * @return the min timestamp of each RS + */ + protected static HashMap getRSLogTimestampMins( + HashMap> rsLogTimestampMap) { + + if (rsLogTimestampMap == null || rsLogTimestampMap.isEmpty()) { + return null; + } + + HashMap rsLogTimestamptMins = new HashMap(); + HashMap> rsLogTimestampMapByRS = + new HashMap>(); + + for (Entry> tableEntry : rsLogTimestampMap.entrySet()) { + TableName table = tableEntry.getKey(); + HashMap rsLogTimestamp = tableEntry.getValue(); + for (Entry rsEntry : rsLogTimestamp.entrySet()) { + String rs = rsEntry.getKey(); + Long ts = rsEntry.getValue(); + if (!rsLogTimestampMapByRS.containsKey(rs)) { + rsLogTimestampMapByRS.put(rs, new HashMap()); + rsLogTimestampMapByRS.get(rs).put(table, ts); + } else { + rsLogTimestampMapByRS.get(rs).put(table, ts); + } + } + } + + for (String rs : rsLogTimestampMapByRS.keySet()) { + rsLogTimestamptMins.put(rs, BackupUtility.getMinValue(rsLogTimestampMapByRS.get(rs))); + } + + return rsLogTimestamptMins; + } + + /** + * copy out Table RegionInfo into incremental backup image need to consider move this logic into + * HBackupFileSystem + * @param backupContext backup context + * @param conf configuration + * @throws IOException exception + * @throws InterruptedException exception + */ + protected static void copyTableRegionInfo(BackupContext backupContext, Configuration conf) + throws IOException, InterruptedException { + + Path rootDir = FSUtils.getRootDir(conf); + FileSystem fs = rootDir.getFileSystem(conf); + + // for each table in the table set, copy out the table info and region info files in the correct + // directory structure + for (TableName table : backupContext.getTables()) { + + LOG.debug("Attempting to copy table info for:" + table); + TableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table); + + // write a copy of descriptor to the target directory + Path target = new Path(backupContext.getBackupStatus(table).getTargetDir()); + FileSystem targetFs = target.getFileSystem(conf); + FSTableDescriptors descriptors = + new FSTableDescriptors(conf, targetFs, FSUtils.getRootDir(conf)); + descriptors.createTableDescriptorForTableDirectory(target, orig, false); + LOG.debug("Finished copying tableinfo."); + + HBaseAdmin hbadmin = null; + // TODO: optimize + List regions = null; + try(Connection conn = ConnectionFactory.createConnection(conf); + Admin admin = conn.getAdmin()) { + regions = admin.getTableRegions(table); + } catch (Exception e) { + throw new BackupException(e); + } + + // For each region, write the region info to disk + LOG.debug("Starting to write region info for table " + table); + for (HRegionInfo regionInfo : regions) { + Path regionDir = + HRegion.getRegionDir(new Path(backupContext.getBackupStatus(table).getTargetDir()), + regionInfo); + regionDir = + new Path(backupContext.getBackupStatus(table).getTargetDir(), regionDir.getName()); + writeRegioninfoOnFilesystem(conf, targetFs, regionDir, regionInfo); + } + LOG.debug("Finished writing region info for table " + table); + } + } + + /** + * Write the .regioninfo file on-disk. + */ + public static void writeRegioninfoOnFilesystem(final Configuration conf, final FileSystem fs, + final Path regionInfoDir, HRegionInfo regionInfo) throws IOException { + final byte[] content = regionInfo.toDelimitedByteArray(); + Path regionInfoFile = new Path(regionInfoDir, ".regioninfo"); + // First check to get the permissions + FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); + // Write the RegionInfo file content + FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null); + try { + out.write(content); + } finally { + out.close(); + } + } + + /** + * TODO: return hostname:port + * @param p + * @return host name: port + * @throws IOException + */ + public static String parseHostNameFromLogFile(Path p) throws IOException { + if (isArchivedLogFile(p)) { + return BackupUtility.parseHostFromOldLog(p); + } else { + ServerName sname = DefaultWALProvider.getServerNameFromWALDirectoryName(p); + return sname.getHostname() + ":" + sname.getPort(); + } + } + + private static boolean isArchivedLogFile(Path p) { + String oldLog = Path.SEPARATOR + HConstants.HREGION_OLDLOGDIR_NAME + Path.SEPARATOR; + return p.toString().contains(oldLog); + } + + /** + * Returns WAL file name + * @param walFileName WAL file name + * @return WAL file name + * @throws IOException exception + * @throws IllegalArgumentException exception + */ + public static String getUniqueWALFileNamePart(String walFileName) throws IOException { + return getUniqueWALFileNamePart(new Path(walFileName)); + } + + /** + * Returns WAL file name + * @param p - WAL file path + * @return WAL file name + * @throws IOException exception + */ + public static String getUniqueWALFileNamePart(Path p) throws IOException { + return p.getName(); + } + + /** + * Get the total length of files under the given directory recursively. + * @param fs The hadoop file system + * @param dir The target directory + * @return the total length of files + * @throws IOException exception + */ + public static long getFilesLength(FileSystem fs, Path dir) throws IOException { + long totalLength = 0; + FileStatus[] files = FSUtils.listStatus(fs, dir); + if (files != null) { + for (FileStatus fileStatus : files) { + if (fileStatus.isDirectory()) { + totalLength += getFilesLength(fs, fileStatus.getPath()); + } else { + totalLength += fileStatus.getLen(); + } + } + } + return totalLength; + } + + /** + * Keep the record for dependency for incremental backup and history info p.s, we may be able to + * merge this class into backupImage class later + */ + public static class BackupCompleteData implements Comparable { + private String startTime; + private String endTime; + private String type; + private String backupRootPath; + private List tableList; + private String backupToken; + private String bytesCopied; + private List ancestors; + + public List getAncestors() { + if (this.ancestors == null) { + this.ancestors = new ArrayList(); + } + return this.ancestors; + } + + public void addAncestor(String backupToken) { + this.getAncestors().add(backupToken); + } + + public String getBytesCopied() { + return bytesCopied; + } + + public void setBytesCopied(String bytesCopied) { + this.bytesCopied = bytesCopied; + } + + public String getBackupToken() { + return backupToken; + } + + public void setBackupToken(String backupToken) { + this.backupToken = backupToken; + } + + public String getStartTime() { + return startTime; + } + + public void setStartTime(String startTime) { + this.startTime = startTime; + } + + public String getEndTime() { + return endTime; + } + + public void setEndTime(String endTime) { + this.endTime = endTime; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public String getBackupRootPath() { + return backupRootPath; + } + + public void setBackupRootPath(String backupRootPath) { + this.backupRootPath = backupRootPath; + } + + public List getTableList() { + return tableList; + } + + public void setTableList(List tableList) { + this.tableList = tableList; + } + + @Override + public int compareTo(BackupCompleteData o) { + Long thisTS = + new Long(this.getBackupToken().substring(this.getBackupToken().lastIndexOf("_") + 1)); + Long otherTS = + new Long(o.getBackupToken().substring(o.getBackupToken().lastIndexOf("_") + 1)); + return thisTS.compareTo(otherTS); + } + + } + + /** + * Sort history list by start time in descending order. + * @param historyList history list + * @return sorted list of BackupCompleteData + */ + public static ArrayList sortHistoryListDesc( + ArrayList historyList) { + ArrayList list = new ArrayList(); + TreeMap map = new TreeMap(); + for (BackupCompleteData h : historyList) { + map.put(h.getStartTime(), h); + } + Iterator i = map.descendingKeySet().iterator(); + while (i.hasNext()) { + list.add(map.get(i.next())); + } + return list; + } + + /** + * Get list of all WAL files (WALs and archive) + * @param c - configuration + * @return list of WAL files + * @throws IOException exception + */ + public static List getListOfWALFiles(Configuration c) throws IOException { + Path rootDir = FSUtils.getRootDir(c); + Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME); + Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); + List logFiles = new ArrayList(); + + FileSystem fs = FileSystem.get(c); + logFiles = BackupUtility.getFiles(fs, logDir, logFiles, null); + logFiles = BackupUtility.getFiles(fs, oldLogDir, logFiles, null); + return logFiles; + } + + /** + * Get list of all WAL files (WALs and archive) + * @param c - configuration + * @return list of WAL files + * @throws IOException exception + */ + public static List getListOfWALFiles(Configuration c, PathFilter filter) + throws IOException { + Path rootDir = FSUtils.getRootDir(c); + Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME); + Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); + List logFiles = new ArrayList(); + + FileSystem fs = FileSystem.get(c); + logFiles = BackupUtility.getFiles(fs, logDir, logFiles, filter); + logFiles = BackupUtility.getFiles(fs, oldLogDir, logFiles, filter); + return logFiles; + } + + /** + * Get list of all old WAL files (WALs and archive) + * @param c - configuration + * @return list of WAL files + * @throws IOException exception + */ + public static List getWALFilesOlderThan(final Configuration c, + final HashMap hostTimestampMap) throws IOException { + Path rootDir = FSUtils.getRootDir(c); + Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME); + Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); + List logFiles = new ArrayList(); + + PathFilter filter = new PathFilter() { + + @Override + public boolean accept(Path p) { + try { + if (DefaultWALProvider.isMetaFile(p)) { + return false; + } + String host = parseHostNameFromLogFile(p); + Long oldTimestamp = hostTimestampMap.get(host); + Long currentLogTS = BackupUtility.getCreationTime(p); + return currentLogTS <= oldTimestamp; + } catch (IOException e) { + LOG.error(e); + return false; + } + } + }; + FileSystem fs = FileSystem.get(c); + logFiles = BackupUtility.getFiles(fs, logDir, logFiles, filter); + logFiles = BackupUtility.getFiles(fs, oldLogDir, logFiles, filter); + return logFiles; + } + + public static String join(TableName[] names) { + StringBuilder sb = new StringBuilder(); + String sep = BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND; + for (TableName s : names) { + sb.append(sep).append(s.getNameAsString()); + } + return sb.toString(); + } + + public static TableName[] parseTableNames(String tables) { + if (tables == null) { + return null; + } + String[] tableArray = tables.split(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND); + + TableName[] ret = new TableName[tableArray.length]; + for (int i = 0; i < tableArray.length; i++) { + ret[i] = TableName.valueOf(tableArray[i]); + } + return ret; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java new file mode 100644 index 0000000..a3d124f --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java @@ -0,0 +1,304 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupUtility; +import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.wal.DefaultWALProvider; + +/** + * After a full backup was created, the incremental backup will only store the changes made + * after the last full or incremental backup. + * + * Creating the backup copies the logfiles in .logs and .oldlogs since the last backup timestamp. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class IncrementalBackupManager { + public static final Log LOG = LogFactory.getLog(IncrementalBackupManager.class); + + // parent manager + private final BackupManager backupManager; + private final Configuration conf; + private final Connection conn; + + public IncrementalBackupManager(BackupManager bm) { + this.backupManager = bm; + this.conf = bm.getConf(); + this.conn = bm.getConnection(); + } + + /** + * Obtain the list of logs that need to be copied out for this incremental backup. The list is set + * in BackupContext. + * @param backupContext backup context + * @return The new HashMap of RS log timestamps after the log roll for this incremental backup. + * @throws IOException exception + */ + public HashMap getIncrBackupLogFileList(BackupContext backupContext) + throws IOException { + List logList; + HashMap newTimestamps; + HashMap previousTimestampMins; + + String savedStartCode = backupManager.readBackupStartCode(); + + // key: tableName + // value: + HashMap> previousTimestampMap = + backupManager.readLogTimestampMap(); + + previousTimestampMins = BackupUtil.getRSLogTimestampMins(previousTimestampMap); + + if (LOG.isDebugEnabled()) { + LOG.debug("StartCode " + savedStartCode + "for backupID " + backupContext.getBackupId()); + } + // get all new log files from .logs and .oldlogs after last TS and before new timestamp + if (savedStartCode == null || + previousTimestampMins == null || + previousTimestampMins.isEmpty()) { + throw new IOException("Cannot read any previous back up timestamps from hbase:backup. " + + "In order to create an incremental backup, at least one full backup is needed."); + } + + try (Admin admin = conn.getAdmin()) { + LOG.info("Execute roll log procedure for incremental backup ..."); + admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, + LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, new HashMap()); + } + + newTimestamps = backupManager.readRegionServerLastLogRollResult(); + + logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, conf, savedStartCode); + logList.addAll(getLogFilesFromBackupSystem(previousTimestampMins, newTimestamps)); + backupContext.setIncrBackupFileList(logList); + + return newTimestamps; + } + + /** + * For each region server: get all log files newer than the last timestamps but not newer than the + * newest timestamps. FROM hbase:backup table + * @param olderTimestamps - the timestamp for each region server of the last backup. + * @param newestTimestamps - the timestamp for each region server that the backup should lead to. + * @return list of log files which needs to be added to this backup + * @throws IOException + */ + private List getLogFilesFromBackupSystem(HashMap olderTimestamps, + HashMap newestTimestamps) throws IOException { + List logFiles = new ArrayList(); + Iterator it = backupManager.getWALFilesFromBackupSystem(); + + while (it.hasNext()) { + String walFileName = it.next(); + String server = BackupUtil.parseHostNameFromLogFile(new Path(walFileName)); + //String server = getServer(walFileName); + Long tss = getTimestamp(walFileName); + Long oldTss = olderTimestamps.get(server); + if (oldTss == null){ + logFiles.add(walFileName); + continue; + } + Long newTss = newestTimestamps.get(server); + if (newTss == null) { + newTss = Long.MAX_VALUE; + } + + if (tss > oldTss && tss < newTss) { + logFiles.add(walFileName); + } + } + return logFiles; + } + + private Long getTimestamp(String walFileName) { + int index = walFileName.lastIndexOf(BackupUtil.LOGNAME_SEPARATOR); + return Long.parseLong(walFileName.substring(index+1)); + } + + /** + * For each region server: get all log files newer than the last timestamps but not newer than the + * newest timestamps. + * @param olderTimestamps the timestamp for each region server of the last backup. + * @param newestTimestamps the timestamp for each region server that the backup should lead to. + * @param conf the Hadoop and Hbase configuration + * @param savedStartCode the startcode (timestamp) of last successful backup. + * @return a list of log files to be backed up + * @throws IOException exception + */ + private List getLogFilesForNewBackup(HashMap olderTimestamps, + HashMap newestTimestamps, Configuration conf, String savedStartCode) + throws IOException { + LOG.debug("In getLogFilesForNewBackup()\n" + "olderTimestamps: " + olderTimestamps + + "\n newestTimestamps: " + newestTimestamps); + Path rootdir = FSUtils.getRootDir(conf); + Path logDir = new Path(rootdir, HConstants.HREGION_LOGDIR_NAME); + Path oldLogDir = new Path(rootdir, HConstants.HREGION_OLDLOGDIR_NAME); + FileSystem fs = rootdir.getFileSystem(conf); + NewestLogFilter pathFilter = new NewestLogFilter(); + + List resultLogFiles = new ArrayList(); + List newestLogs = new ArrayList(); + + /* + * The old region servers and timestamps info we kept in hbase:backup may be out of sync if new + * region server is added or existing one lost. We'll deal with it here when processing the + * logs. If data in hbase:backup has more hosts, just ignore it. If the .logs directory includes + * more hosts, the additional hosts will not have old timestamps to compare with. We'll just use + * all the logs in that directory. We always write up-to-date region server and timestamp info + * to hbase:backup at the end of successful backup. + */ + + FileStatus[] rss; + Path p; + String host; + Long oldTimeStamp; + String currentLogFile; + Long currentLogTS; + + // Get the files in .logs. + rss = fs.listStatus(logDir); + for (FileStatus rs : rss) { + p = rs.getPath(); + host = BackupUtil.parseHostNameFromLogFile(p); + FileStatus[] logs; + oldTimeStamp = olderTimestamps.get(host); + // It is possible that there is no old timestamp in hbase:backup for this host if + // this region server is newly added after our last backup. + if (oldTimeStamp == null) { + logs = fs.listStatus(p); + } else { + pathFilter.setLastBackupTS(oldTimeStamp); + logs = fs.listStatus(p, pathFilter); + } + for (FileStatus log : logs) { + LOG.debug("currentLogFile: " + log.getPath().toString()); + if (DefaultWALProvider.isMetaFile(log.getPath())) { + if(LOG.isDebugEnabled()) { + LOG.debug("Skip hbase:meta log file: " + log.getPath().getName()); + } + continue; + } + currentLogFile = log.getPath().toString(); + resultLogFiles.add(currentLogFile); + currentLogTS = BackupUtility.getCreationTime(log.getPath()); + // newestTimestamps is up-to-date with the current list of hosts + // so newestTimestamps.get(host) will not be null. + if (Long.valueOf(currentLogTS) > Long.valueOf(newestTimestamps.get(host))) { + newestLogs.add(currentLogFile); + } + } + } + + // Include the .oldlogs files too. + FileStatus[] oldlogs = fs.listStatus(oldLogDir); + for (FileStatus oldlog : oldlogs) { + p = oldlog.getPath(); + currentLogFile = p.toString(); + if (DefaultWALProvider.isMetaFile(p)) { + if(LOG.isDebugEnabled()) { + LOG.debug("Skip .meta log file: " + currentLogFile); + } + continue; + } + host = BackupUtility.parseHostFromOldLog(p); + currentLogTS = BackupUtility.getCreationTime(p); + oldTimeStamp = olderTimestamps.get(host); + /* + * It is possible that there is no old timestamp in hbase:backup for this host. At the time of + * our last backup operation, this rs did not exist. The reason can be one of the two: 1. The + * rs already left/crashed. Its logs were moved to .oldlogs. 2. The rs was added after our + * last backup. + */ + if (oldTimeStamp == null) { + if (Long.valueOf(currentLogTS) < Long.valueOf(savedStartCode)) { + // This log file is really old, its region server was before our last backup. + continue; + } else { + resultLogFiles.add(currentLogFile); + } + } else if (Long.valueOf(currentLogTS) > Long.valueOf(oldTimeStamp)) { + resultLogFiles.add(currentLogFile); + } + + // It is possible that a host in .oldlogs is an obsolete region server + // so newestTimestamps.get(host) here can be null. + // Even if these logs belong to a obsolete region server, we still need + // to include they to avoid loss of edits for backup. + Long newTimestamp = newestTimestamps.get(host); + if (newTimestamp != null && Long.valueOf(currentLogTS) > Long.valueOf(newTimestamp)) { + newestLogs.add(currentLogFile); + } + } + // remove newest log per host because they are still in use + resultLogFiles.removeAll(newestLogs); + return resultLogFiles; + } + + class NewestLogFilter implements PathFilter { + private Long lastBackupTS = 0L; + + public NewestLogFilter() { + } + + protected void setLastBackupTS(Long ts) { + this.lastBackupTS = ts; + } + + @Override + public boolean accept(Path path) { + // skip meta table log -- ts.meta file + if (DefaultWALProvider.isMetaFile(path)) { + if(LOG.isDebugEnabled()) { + LOG.debug("Skip .meta log file: " + path.getName()); + } + return false; + } + Long timestamp = null; + try { + timestamp = BackupUtility.getCreationTime(path); + return timestamp > Long.valueOf(lastBackupTS); + } catch (IOException e) { + LOG.warn("Cannot read timestamp of log file " + path); + return false; + } + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalRestoreService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalRestoreService.java new file mode 100644 index 0000000..12ecbe9 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalRestoreService.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface IncrementalRestoreService extends Configurable{ + + public void run(String logDirectory, TableName[] fromTables, TableName[] toTables) + throws IOException; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java new file mode 100644 index 0000000..ef1a6d2 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java @@ -0,0 +1,312 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeSet; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.BackupUtility; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.RestoreClient; +import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; + +/** + * The main class which interprets the given arguments and trigger restore operation. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public final class RestoreClientImpl implements RestoreClient { + + private static final Log LOG = LogFactory.getLog(RestoreClientImpl.class); + private Configuration conf; + private Set lastRestoreImagesSet; + + public RestoreClientImpl() { + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + + /** + * Restore operation. Stage 1: validate backupManifest, and check target tables + * @param hBackupFS to access the backup image + * @param backupRootDir The root dir for backup image + * @param backupId The backup id for image to be restored + * @param check True if only do dependency check + * @param autoRestore True if automatically restore following the dependency + * @param sTableArray The array of tables to be restored + * @param tTableArray The array of mapping tables to restore to + * @param isOverwrite True then do restore overwrite if target table exists, otherwise fail the + * request if target table exists + * @return True if only do dependency check + * @throws IOException if any failure during restore + */ + @Override + public boolean restore(HBackupFileSystem hBackupFS, String backupRootDir, + String backupId, boolean check, boolean autoRestore, TableName[] sTableArray, + TableName[] tTableArray, boolean isOverwrite) throws IOException { + + HashMap backupManifestMap = new HashMap<>(); + // check and load backup image manifest for the tables + hBackupFS.checkImageManifestExist(backupManifestMap, sTableArray); + + try { + // Check and validate the backup image and its dependencies + if (check || autoRestore) { + if (validate(backupManifestMap)) { + LOG.info("Checking backup images: ok"); + } else { + String errMsg = "Some dependencies are missing for restore"; + LOG.error(errMsg); + throw new IOException(errMsg); + } + } + + // return true if only for check + if (check) { + return true; + } + + if (tTableArray == null) { + tTableArray = sTableArray; + } + + // check the target tables + checkTargetTables(tTableArray, isOverwrite); + + // start restore process + Set restoreImageSet = + restoreStage(hBackupFS, backupManifestMap, sTableArray, tTableArray, autoRestore); + + LOG.info("Restore for " + Arrays.asList(sTableArray) + " are successful!"); + lastRestoreImagesSet = restoreImageSet; + + } catch (IOException e) { + LOG.error("ERROR: restore failed with error: " + e.getMessage()); + throw e; + } + + // not only for check, return false + return false; + } + + /** + * Get last restore image set. The value is globally set for the latest finished restore. + * @return the last restore image set + */ + public Set getLastRestoreImagesSet() { + return lastRestoreImagesSet; + } + + private boolean validate(HashMap backupManifestMap) + throws IOException { + boolean isValid = true; + + for (Entry manifestEntry : backupManifestMap.entrySet()) { + TableName table = manifestEntry.getKey(); + TreeSet imageSet = new TreeSet(); + + ArrayList depList = manifestEntry.getValue().getDependentListByTable(table); + if (depList != null && !depList.isEmpty()) { + imageSet.addAll(depList); + } + + // todo merge + LOG.debug("merge will be implemented in future jira"); + // BackupUtil.clearMergedImages(table, imageSet, conf); + + LOG.info("Dependent image(s) from old to new:"); + for (BackupImage image : imageSet) { + String imageDir = + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table); + if (!BackupUtility.checkPathExist(imageDir, conf)) { + LOG.error("ERROR: backup image does not exist: " + imageDir); + isValid = false; + break; + } + // TODO More validation? + LOG.info("Backup image: " + image.getBackupId() + " for '" + table + "' is available"); + } + } + return isValid; + } + + /** + * Validate target Tables + * @param tTableArray: target tables + * @param isOverwrite overwrite existing table + * @throws IOException exception + */ + private void checkTargetTables(TableName[] tTableArray, boolean isOverwrite) + throws IOException { + ArrayList existTableList = new ArrayList<>(); + ArrayList disabledTableList = new ArrayList<>(); + + // check if the tables already exist + try(Connection conn = ConnectionFactory.createConnection(conf); + Admin admin = conn.getAdmin()) { + for (TableName tableName : tTableArray) { + if (admin.tableExists(tableName)) { + existTableList.add(tableName); + if (admin.isTableDisabled(tableName)) { + disabledTableList.add(tableName); + } + } else { + LOG.info("HBase table " + tableName + + " does not exist. It will be create during backup process"); + } + } + } + + if (existTableList.size() > 0) { + if (!isOverwrite) { + LOG.error("Existing table found in the restore target, please add \"-overwrite\" " + + "option in the command if you mean to restore to these existing tables"); + LOG.info("Existing table list in restore target: " + existTableList); + throw new IOException("Existing table found in target while no \"-overwrite\" " + + "option found"); + } else { + if (disabledTableList.size() > 0) { + LOG.error("Found offline table in the restore target, " + + "please enable them before restore with \"-overwrite\" option"); + LOG.info("Offline table list in restore target: " + disabledTableList); + throw new IOException( + "Found offline table in the target when restore with \"-overwrite\" option"); + } + } + } + + } + + /** + * Restore operation. Stage 2: resolved Backup Image dependency + * @param hBackupFS to access the backup image + * @param backupManifestMap : tableName, Manifest + * @param sTableArray The array of tables to be restored + * @param tTableArray The array of mapping tables to restore to + * @param autoRestore : yes, restore all the backup images on the dependency list + * @return set of BackupImages restored + * @throws IOException exception + */ + private Set restoreStage(HBackupFileSystem hBackupFS, + HashMap backupManifestMap, TableName[] sTableArray, + TableName[] tTableArray, boolean autoRestore) throws IOException { + TreeSet restoreImageSet = new TreeSet(); + + for (int i = 0; i < sTableArray.length; i++) { + restoreImageSet.clear(); + TableName table = sTableArray[i]; + BackupManifest manifest = backupManifestMap.get(table); + if (autoRestore) { + // Get the image list of this backup for restore in time order from old + // to new. + TreeSet restoreList = + new TreeSet(manifest.getDependentListByTable(table)); + LOG.debug("need to clear merged Image. to be implemented in future jira"); + + for (BackupImage image : restoreList) { + restoreImage(image, table, tTableArray[i]); + } + restoreImageSet.addAll(restoreList); + } else { + BackupImage image = manifest.getBackupImage(); + List depList = manifest.getDependentListByTable(table); + // The dependency list always contains self. + if (depList != null && depList.size() > 1) { + LOG.warn("Backup image " + image.getBackupId() + " depends on other images.\n" + + "this operation will only restore the delta contained within backupImage " + + image.getBackupId()); + } + restoreImage(image, table, tTableArray[i]); + restoreImageSet.add(image); + } + + if (autoRestore) { + if (restoreImageSet != null && !restoreImageSet.isEmpty()) { + LOG.info("Restore includes the following image(s):"); + for (BackupImage image : restoreImageSet) { + LOG.info(" Backup: " + + image.getBackupId() + + " " + + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), + table)); + } + } + } + + } + return restoreImageSet; + } + + /** + * Restore operation handle each backupImage + * @param image: backupImage + * @param sTable: table to be restored + * @param tTable: table to be restored to + * @throws IOException exception + */ + private void restoreImage(BackupImage image, TableName sTable, TableName tTable) + throws IOException { + String rootDir = image.getRootDir(); + String backupId = image.getBackupId(); + + HBackupFileSystem hFS = new HBackupFileSystem(conf, new Path(rootDir), backupId); + RestoreUtil restoreTool = new RestoreUtil(conf, hFS); + BackupManifest manifest = hFS.getManifest(sTable); + + Path tableBackupPath = hFS.getTableBackupPath(sTable); + + // todo: convert feature will be provided in a future jira + boolean converted = false; + + if (manifest.getType() == BackupType.FULL || converted) { + LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from " + + (converted ? "converted" : "full") + " backup image " + tableBackupPath.toString()); + restoreTool.fullRestoreTable(tableBackupPath, sTable, tTable, converted); + } else { // incremental Backup + String logBackupDir = + HBackupFileSystem.getLogBackupDir(image.getRootDir(), image.getBackupId()); + LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from incremental backup image " + + logBackupDir); + restoreTool.incrementalRestoreTable(logBackupDir, new TableName[] { sTable }, + new TableName[] { tTable }); + } + + LOG.info(sTable + " has been successfully restored to " + tTable); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreUtil.java new file mode 100644 index 0000000..2dd38c1 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreUtil.java @@ -0,0 +1,291 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupRestoreFactory; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.snapshot.SnapshotManifest; + +/** + * A collection for methods used by multiple classes to restore HBase tables. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class RestoreUtil { + + public static final Log LOG = LogFactory.getLog(RestoreUtil.class); + + protected Configuration conf = null; + + protected HBackupFileSystem hBackupFS = null; + + // store table name and snapshot dir mapping + private final HashMap snapshotMap = new HashMap<>(); + + public RestoreUtil(Configuration conf, HBackupFileSystem hBackupFS) throws IOException { + this.conf = conf; + this.hBackupFS = hBackupFS; + } + + /** + * During incremental backup operation. Call WalPlayer to replay WAL in backup image Currently + * tableNames and newTablesNames only contain single table, will be expanded to multiple tables in + * the future + * @param logDir : incremental backup folders, which contains WAL + * @param tableNames : source tableNames(table names were backuped) + * @param newTableNames : target tableNames(table names to be restored to) + * @throws IOException exception + */ + public void incrementalRestoreTable(String logDir, + TableName[] tableNames, TableName[] newTableNames) throws IOException { + + if (tableNames.length != newTableNames.length) { + throw new IOException("Number of source tables adn taget Tables does not match!"); + } + + // for incremental backup image, expect the table already created either by user or previous + // full backup. Here, check that all new tables exists + try (Connection conn = ConnectionFactory.createConnection(conf); + Admin admin = conn.getAdmin()) { + for (TableName tableName : newTableNames) { + if (!admin.tableExists(tableName)) { + admin.close(); + throw new IOException("HBase table " + tableName + + " does not exist. Create the table first, e.g. by restoring a full backup."); + } + } + IncrementalRestoreService restoreService = + BackupRestoreFactory.getIncrementalRestoreService(conf); + + restoreService.run(logDir, tableNames, newTableNames); + } + } + + public void fullRestoreTable(Path tableBackupPath, TableName tableName, TableName newTableName, + boolean converted) throws IOException { + restoreTableAndCreate(tableName, newTableName, tableBackupPath, converted); + } + + private void restoreTableAndCreate(TableName tableName, TableName newTableName, + Path tableBackupPath, boolean converted) throws IOException { + if (newTableName == null || newTableName.equals("")) { + newTableName = tableName; + } + + FileSystem fileSys = tableBackupPath.getFileSystem(this.conf); + + // get table descriptor first + HTableDescriptor tableDescriptor = null; + + Path tableSnapshotPath = hBackupFS.getTableSnapshotPath(tableName); + + if (fileSys.exists(tableSnapshotPath)) { + // snapshot path exist means the backup path is in HDFS + // check whether snapshot dir already recorded for target table + if (snapshotMap.get(tableName) != null) { + SnapshotDescription desc = + SnapshotDescriptionUtils.readSnapshotInfo(fileSys, tableSnapshotPath); + SnapshotManifest manifest = SnapshotManifest.open(conf, fileSys, tableSnapshotPath, desc); + tableDescriptor = manifest.getTableDescriptor(); + + + } else { + tableDescriptor = hBackupFS.getTableDesc(tableName); + snapshotMap.put(tableName, hBackupFS.getTableInfoPath(tableName)); + } + if (tableDescriptor == null) { + LOG.debug("Found no table descriptor in the snapshot dir, previous schema would be lost"); + } + } else if (converted) { + // first check if this is a converted backup image + LOG.error("convert will be supported in a future jira"); + } + + Path tableArchivePath = hBackupFS.getTableArchivePath(tableName); + if (tableArchivePath == null) { + if (tableDescriptor != null) { + // find table descriptor but no archive dir means the table is empty, create table and exit + if(LOG.isDebugEnabled()) { + LOG.debug("find table descriptor but no archive dir for table " + tableName + + ", will only create table"); + } + tableDescriptor.setName(newTableName); + checkAndCreateTable(tableBackupPath, tableName, newTableName, null, tableDescriptor); + return; + } else { + throw new IllegalStateException("Cannot restore hbase table because directory '" + + " tableArchivePath is null."); + } + } + + if (tableDescriptor == null) { + tableDescriptor = new HTableDescriptor(newTableName); + } else { + tableDescriptor.setName(newTableName); + } + + if (!converted) { + // record all region dirs: + // load all files in dir + try { + ArrayList regionPathList = hBackupFS.getRegionList(tableName); + + // should only try to create the table with all region informations, so we could pre-split + // the regions in fine grain + checkAndCreateTable(tableBackupPath, tableName, newTableName, regionPathList, + tableDescriptor); + if (tableArchivePath != null) { + // start real restore through bulkload + // if the backup target is on local cluster, special action needed + Path tempTableArchivePath = hBackupFS.checkLocalAndBackup(tableArchivePath); + if (tempTableArchivePath.equals(tableArchivePath)) { + if(LOG.isDebugEnabled()) { + LOG.debug("TableArchivePath for bulkload using existPath: " + tableArchivePath); + } + } else { + regionPathList = hBackupFS.getRegionList(tempTableArchivePath); // point to the tempDir + if(LOG.isDebugEnabled()) { + LOG.debug("TableArchivePath for bulkload using tempPath: " + tempTableArchivePath); + } + } + + LoadIncrementalHFiles loader = createLoader(tempTableArchivePath, false); + for (Path regionPath : regionPathList) { + String regionName = regionPath.toString(); + if(LOG.isDebugEnabled()) { + LOG.debug("Restoring HFiles from directory " + regionName); + } + String[] args = { regionName, newTableName.getNameAsString()}; + loader.run(args); + } + } + // we do not recovered edits + } catch (Exception e) { + throw new IllegalStateException("Cannot restore hbase table", e); + } + } else { + LOG.debug("convert will be supported in a future jira"); + } + } + + + + /** + * Create a {@link LoadIncrementalHFiles} instance to be used to restore the HFiles of a full + * backup. + * @return the {@link LoadIncrementalHFiles} instance + * @throws IOException exception + */ + private LoadIncrementalHFiles createLoader(Path tableArchivePath, boolean multipleTables) + throws IOException { + // set configuration for restore: + // LoadIncrementalHFile needs more time + // hbase.rpc.timeout 600000 + // calculates + Integer milliSecInMin = 60000; + Integer previousMillis = this.conf.getInt("hbase.rpc.timeout", 0); + Integer numberOfFilesInDir = + multipleTables ? hBackupFS.getMaxNumberOfFilesInSubDir(tableArchivePath) : hBackupFS + .getNumberOfFilesInDir(tableArchivePath); + Integer calculatedMillis = numberOfFilesInDir * milliSecInMin; // 1 minute per file + Integer resultMillis = Math.max(calculatedMillis, previousMillis); + if (resultMillis > previousMillis) { + LOG.info("Setting configuration for restore with LoadIncrementalHFile: " + + "hbase.rpc.timeout to " + calculatedMillis / milliSecInMin + + " minutes, to handle the number of files in backup " + tableArchivePath); + this.conf.setInt("hbase.rpc.timeout", resultMillis); + } + + LoadIncrementalHFiles loader = null; + try { + loader = new LoadIncrementalHFiles(this.conf); + } catch (Exception e1) { + throw new IOException(e1); + } + return loader; + } + + /** + * Prepare the table for bulkload, most codes copied from + * {@link LoadIncrementalHFiles#createTable(String, String)} + * @param tableBackupPath path + * @param tableName table name + * @param targetTableName target table name + * @param regionDirList region directory list + * @param htd table descriptor + * @throws IOException exception + */ + private void checkAndCreateTable(Path tableBackupPath, TableName tableName, + TableName targetTableName, ArrayList regionDirList, HTableDescriptor htd) + throws IOException { + HBaseAdmin hbadmin = null; + Connection conn = null; + try { + conn = ConnectionFactory.createConnection(conf); + hbadmin = (HBaseAdmin) conn.getAdmin(); + if (hbadmin.tableExists(targetTableName)) { + LOG.info("Using exising target table '" + targetTableName + "'"); + } else { + LOG.info("Creating target table '" + targetTableName + "'"); + + // if no region dir given, create the table and return + if (regionDirList == null || regionDirList.size() == 0) { + + hbadmin.createTable(htd); + return; + } + + byte[][] keys = hBackupFS.generateBoundaryKeys(regionDirList); + + // create table using table decriptor and region boundaries + hbadmin.createTable(htd, keys); + } + } catch (Exception e) { + throw new IOException(e); + } finally { + if (hbadmin != null) { + hbadmin.close(); + } + if(conn != null){ + conn.close(); + } + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyService.java new file mode 100644 index 0000000..14235ce --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyService.java @@ -0,0 +1,297 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.mapreduce; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.math.BigDecimal; +import java.util.Arrays; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupContext; +import org.apache.hadoop.hbase.backup.impl.BackupCopyService; +import org.apache.hadoop.hbase.backup.impl.BackupHandler; +import org.apache.hadoop.hbase.backup.impl.BackupManager; +import org.apache.hadoop.hbase.backup.impl.BackupUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.snapshot.ExportSnapshot; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.tools.DistCp; +import org.apache.hadoop.tools.DistCpConstants; +import org.apache.hadoop.tools.DistCpOptions; +/** + * Copier for backup operation. Basically, there are 2 types of copy. One is copying from snapshot, + * which bases on extending ExportSnapshot's function with copy progress reporting to ZooKeeper + * implementation. The other is copying for incremental log files, which bases on extending + * DistCp's function with copy progress reporting to ZooKeeper implementation. + * + * For now this is only a wrapper. The other features such as progress and increment backup will be + * implemented in future jira + */ + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class MapReduceBackupCopyService implements BackupCopyService { + private static final Log LOG = LogFactory.getLog(MapReduceBackupCopyService.class); + + private Configuration conf; + // private static final long BYTES_PER_MAP = 2 * 256 * 1024 * 1024; + + // Accumulated progress within the whole backup process for the copy operation + private float progressDone = 0.1f; + private long bytesCopied = 0; + private static float INIT_PROGRESS = 0.1f; + + // The percentage of the current copy task within the whole task if multiple time copies are + // needed. The default value is 100%, which means only 1 copy task for the whole. + private float subTaskPercntgInWholeTask = 1f; + + public MapReduceBackupCopyService() { + } + + @Override + public Configuration getConf() { + return conf; + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + + /** + * Get the current copy task percentage within the whole task if multiple copies are needed. + * @return the current copy task percentage + */ + public float getSubTaskPercntgInWholeTask() { + return subTaskPercntgInWholeTask; + } + + /** + * Set the current copy task percentage within the whole task if multiple copies are needed. Must + * be called before calling + * {@link #copy(BackupHandler, Configuration, Type, String[])} + * @param subTaskPercntgInWholeTask The percentage of the copy subtask + */ + public void setSubTaskPercntgInWholeTask(float subTaskPercntgInWholeTask) { + this.subTaskPercntgInWholeTask = subTaskPercntgInWholeTask; + } + + class SnapshotCopy extends ExportSnapshot { + private BackupContext backupContext; + private TableName table; + + public SnapshotCopy(BackupContext backupContext, TableName table) { + super(); + this.backupContext = backupContext; + this.table = table; + } + + public TableName getTable() { + return this.table; + } + } + + // Extends DistCp for progress updating to hbase:backup + // during backup. Using DistCpV2 (MAPREDUCE-2765). + // Simply extend it and override execute() method to get the + // Job reference for progress updating. + // Only the argument "src1, [src2, [...]] dst" is supported, + // no more DistCp options. + class BackupDistCp extends DistCp { + + private BackupContext backupContext; + private BackupManager backupManager; + + public BackupDistCp(Configuration conf, DistCpOptions options, BackupContext backupContext, + BackupManager backupManager) + throws Exception { + super(conf, options); + this.backupContext = backupContext; + this.backupManager = backupManager; + } + + @Override + public Job execute() throws Exception { + + // reflection preparation for private methods and fields + Class classDistCp = org.apache.hadoop.tools.DistCp.class; + Method methodCreateMetaFolderPath = classDistCp.getDeclaredMethod("createMetaFolderPath"); + Method methodCreateJob = classDistCp.getDeclaredMethod("createJob"); + Method methodCreateInputFileListing = + classDistCp.getDeclaredMethod("createInputFileListing", Job.class); + Method methodCleanup = classDistCp.getDeclaredMethod("cleanup"); + + Field fieldInputOptions = classDistCp.getDeclaredField("inputOptions"); + Field fieldMetaFolder = classDistCp.getDeclaredField("metaFolder"); + Field fieldJobFS = classDistCp.getDeclaredField("jobFS"); + Field fieldSubmitted = classDistCp.getDeclaredField("submitted"); + + methodCreateMetaFolderPath.setAccessible(true); + methodCreateJob.setAccessible(true); + methodCreateInputFileListing.setAccessible(true); + methodCleanup.setAccessible(true); + + fieldInputOptions.setAccessible(true); + fieldMetaFolder.setAccessible(true); + fieldJobFS.setAccessible(true); + fieldSubmitted.setAccessible(true); + + // execute() logic starts here + assert fieldInputOptions.get(this) != null; + assert getConf() != null; + + Job job = null; + try { + synchronized (this) { + // Don't cleanup while we are setting up. + fieldMetaFolder.set(this, methodCreateMetaFolderPath.invoke(this)); + fieldJobFS.set(this, ((Path) fieldMetaFolder.get(this)).getFileSystem(getConf())); + + job = (Job) methodCreateJob.invoke(this); + } + methodCreateInputFileListing.invoke(this, job); + + // Get the total length of the source files + List srcs = ((DistCpOptions) fieldInputOptions.get(this)).getSourcePaths(); + long totalSrcLgth = 0; + for (Path aSrc : srcs) { + totalSrcLgth += BackupUtil.getFilesLength(aSrc.getFileSystem(getConf()), aSrc); + } + + // submit the copy job + job.submit(); + fieldSubmitted.set(this, true); + + // after submit the MR job, set its handler in backup handler for cancel process + // this.backupHandler.copyJob = job; + + // Update the copy progress to ZK every 0.5s if progress value changed + int progressReportFreq = + this.getConf().getInt("hbase.backup.progressreport.frequency", 500); + float lastProgress = progressDone; + while (!job.isComplete()) { + float newProgress = + progressDone + job.mapProgress() * subTaskPercntgInWholeTask * (1 - INIT_PROGRESS); + + if (newProgress > lastProgress) { + + BigDecimal progressData = + new BigDecimal(newProgress * 100).setScale(1, BigDecimal.ROUND_HALF_UP); + String newProgressStr = progressData + "%"; + LOG.info("Progress: " + newProgressStr); + BackupHandler.updateProgress(backupContext, backupManager, progressData.intValue(), + bytesCopied); + LOG.debug("Backup progress data updated to hbase:backup: \"Progress: " + newProgressStr + + ".\""); + lastProgress = newProgress; + } + Thread.sleep(progressReportFreq); + } + + // update the progress data after copy job complete + float newProgress = + progressDone + job.mapProgress() * subTaskPercntgInWholeTask * (1 - INIT_PROGRESS); + BigDecimal progressData = + new BigDecimal(newProgress * 100).setScale(1, BigDecimal.ROUND_HALF_UP); + + String newProgressStr = progressData + "%"; + LOG.info("Progress: " + newProgressStr); + + // accumulate the overall backup progress + progressDone = newProgress; + bytesCopied += totalSrcLgth; + + BackupHandler.updateProgress(backupContext, backupManager, progressData.intValue(), + bytesCopied); + LOG.debug("Backup progress data updated to hbase:backup: \"Progress: " + newProgressStr + + " - " + bytesCopied + " bytes copied.\""); + + } finally { + if (!fieldSubmitted.getBoolean(this)) { + methodCleanup.invoke(this); + } + } + + String jobID = job.getJobID().toString(); + job.getConfiguration().set(DistCpConstants.CONF_LABEL_DISTCP_JOB_ID, jobID); + + LOG.debug("DistCp job-id: " + jobID); + return job; + } + + } + + /** + * Do backup copy based on different types. + * @param context The backup context + * @param conf The hadoop configuration + * @param copyType The backup copy type + * @param options Options for customized ExportSnapshot or DistCp + * @throws Exception exception + */ + @Override + public int copy(BackupContext context, BackupManager backupManager, Configuration conf, + BackupCopyService.Type copyType, String[] options) throws IOException { + int res = 0; + + try { + if (copyType == Type.FULL) { + SnapshotCopy snapshotCp = + new SnapshotCopy(context, context.getTableBySnapshot(options[1])); + LOG.debug("Doing SNAPSHOT_COPY"); + // Make a new instance of conf to be used by the snapshot copy class. + snapshotCp.setConf(new Configuration(conf)); + res = snapshotCp.run(options); + } else if (copyType == Type.INCREMENTAL) { + LOG.debug("Doing COPY_TYPE_DISTCP"); + setSubTaskPercntgInWholeTask(1f); + + BackupDistCp distcp = new BackupDistCp(new Configuration(conf), null, context, + backupManager); + // Handle a special case where the source file is a single file. + // In this case, distcp will not create the target dir. It just take the + // target as a file name and copy source file to the target (as a file name). + // We need to create the target dir before run distcp. + LOG.debug("DistCp options: " + Arrays.toString(options)); + if (options.length == 2) { + Path dest = new Path(options[1]); + FileSystem destfs = dest.getFileSystem(conf); + if (!destfs.exists(dest)) { + destfs.mkdirs(dest); + } + } + + res = distcp.run(options); + } + return res; + + } catch (Exception e) { + throw new IOException(e); + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreService.java new file mode 100644 index 0000000..203c9a3 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreService.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.mapreduce; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.impl.BackupUtil; +import org.apache.hadoop.hbase.backup.impl.IncrementalRestoreService; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.mapreduce.WALPlayer; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class MapReduceRestoreService implements IncrementalRestoreService { + public static final Log LOG = LogFactory.getLog(MapReduceRestoreService.class); + + private WALPlayer player; + + public MapReduceRestoreService() { + this.player = new WALPlayer(); + } + + @Override + public void run(String logDir, TableName[] tableNames, TableName[] newTableNames) throws IOException { + String tableStr = BackupUtil.join(tableNames); + String newTableStr = BackupUtil.join(newTableNames); + + // WALPlayer reads all files in arbitrary directory structure and creates a Map task for each + // log file + + String[] playerArgs = { logDir, tableStr, newTableStr }; + LOG.info("Restore incremental backup from directory " + logDir + " from hbase tables " + + BackupUtil.join(tableNames) + " to tables " + + BackupUtil.join(newTableNames)); + try { + player.run(playerArgs); + } catch (Exception e) { + throw new IOException("cannot restore from backup directory " + logDir + + " (check Hadoop and HBase logs) " + e); + } + } + + @Override + public Configuration getConf() { + return player.getConf(); + } + + @Override + public void setConf(Configuration conf) { + this.player.setConf(conf); + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java new file mode 100644 index 0000000..dae24a6 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java @@ -0,0 +1,119 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.master; + +import com.google.common.base.Predicate; +import com.google.common.collect.Iterables; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate; + +/** + * Implementation of a log cleaner that checks if a log is still scheduled for + * incremental backup before deleting it when its TTL is over. + */ +@InterfaceStability.Evolving +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) +public class BackupLogCleaner extends BaseLogCleanerDelegate { + private static final Log LOG = LogFactory.getLog(BackupLogCleaner.class); + + private boolean stopped = false; + + public BackupLogCleaner() { + } + + @Override + public Iterable getDeletableFiles(Iterable files) { + // all members of this class are null if backup is disabled, + // so we cannot filter the files + if (this.getConf() == null) { + return files; + } + + List list = new ArrayList(); + // TODO: LogCleaners do not have a way to get the Connection from Master. We should find a + // way to pass it down here, so that this connection is not re-created every time. + // It is expensive + try(Connection connection = ConnectionFactory.createConnection(this.getConf()); + final BackupSystemTable table = new BackupSystemTable(connection)) { + + // If we do not have recorded backup sessions + if (!table.hasBackupSessions()) { + return files; + } + + for(FileStatus file: files){ + String wal = file.getPath().toString(); + boolean logInSystemTable = table.checkWALFile(wal); + if(LOG.isDebugEnabled()) { + if(logInSystemTable) { + LOG.debug("Found log file in hbase:backup, deleting: " + wal); + list.add(file); + } else { + LOG.debug("Didn't find this log in hbase:backup, keeping: " + wal); + } + } + } + return list; + } catch (IOException e) { + LOG.error("Failed to get hbase:backup table, therefore will keep all files", e); + // nothing to delete + return new ArrayList(); + } + } + + @Override + public void setConf(Configuration config) { + // If backup is disabled, keep all members null + if (!config.getBoolean(HConstants.BACKUP_ENABLE_KEY, HConstants.BACKUP_ENABLE_DEFAULT)) { + LOG.warn("Backup is disabled - allowing all wals to be deleted"); + return; + } + super.setConf(config); + } + + @Override + public void stop(String why) { + if (this.stopped) { + return; + } + this.stopped = true; + LOG.info("Stopping BackupLogCleaner"); + } + + @Override + public boolean isStopped() { + return this.stopped; + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java new file mode 100644 index 0000000..f96682f --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java @@ -0,0 +1,129 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.master; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ThreadPoolExecutor; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.CoordinatedStateManagerFactory; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; +import org.apache.hadoop.hbase.errorhandling.ForeignException; +import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.MetricsMaster; +import org.apache.hadoop.hbase.procedure.MasterProcedureManager; +import org.apache.hadoop.hbase.procedure.Procedure; +import org.apache.hadoop.hbase.procedure.ProcedureCoordinator; +import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; +import org.apache.zookeeper.KeeperException; + +public class LogRollMasterProcedureManager extends MasterProcedureManager { + + public static final String ROLLLOG_PROCEDURE_SIGNATURE = "rolllog-proc"; + public static final String ROLLLOG_PROCEDURE_NAME = "rolllog"; + private static final Log LOG = LogFactory.getLog(LogRollMasterProcedureManager.class); + + private MasterServices master; + private ProcedureCoordinator coordinator; + private boolean done; + + @Override + public void stop(String why) { + LOG.info("stop: " + why); + } + + @Override + public boolean isStopped() { + return false; + } + + @Override + public void initialize(MasterServices master, MetricsMaster metricsMaster) + throws KeeperException, IOException, UnsupportedOperationException { + this.master = master; + this.done = false; + + // setup the default procedure coordinator + String name = master.getServerName().toString(); + ThreadPoolExecutor tpool = ProcedureCoordinator.defaultPool(name, 1); + BaseCoordinatedStateManager coordManager = + (BaseCoordinatedStateManager) CoordinatedStateManagerFactory + .getCoordinatedStateManager(master.getConfiguration()); + coordManager.initialize(master); + + ProcedureCoordinatorRpcs comms = + coordManager.getProcedureCoordinatorRpcs(getProcedureSignature(), name); + + this.coordinator = new ProcedureCoordinator(comms, tpool); + } + + @Override + public String getProcedureSignature() { + return ROLLLOG_PROCEDURE_SIGNATURE; + } + + @Override + public void execProcedure(ProcedureDescription desc) throws IOException { + this.done = false; + // start the process on the RS + ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(desc.getInstance()); + List serverNames = master.getServerManager().getOnlineServersList(); + List servers = new ArrayList(); + for (ServerName sn : serverNames) { + servers.add(sn.toString()); + } + Procedure proc = coordinator.startProcedure(monitor, desc.getInstance(), new byte[0], servers); + if (proc == null) { + String msg = "Failed to submit distributed procedure for '" + desc.getInstance() + "'"; + LOG.error(msg); + throw new IOException(msg); + } + + try { + // wait for the procedure to complete. A timer thread is kicked off that should cancel this + // if it takes too long. + proc.waitForCompleted(); + LOG.info("Done waiting - exec procedure for " + desc.getInstance()); + LOG.info("Distributed roll log procedure is successful!"); + this.done = true; + } catch (InterruptedException e) { + ForeignException ee = + new ForeignException("Interrupted while waiting for roll log procdure to finish", e); + monitor.receive(ee); + Thread.currentThread().interrupt(); + } catch (ForeignException e) { + ForeignException ee = + new ForeignException("Exception while waiting for roll log procdure to finish", e); + monitor.receive(ee); + } + monitor.rethrowException(); + } + + @Override + public boolean isProcedureDone(ProcedureDescription desc) throws IOException { + return done; + } + +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java new file mode 100644 index 0000000..d524140 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java @@ -0,0 +1,143 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.regionserver; + +import java.util.HashMap; +import java.util.concurrent.Callable; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.errorhandling.ForeignException; +import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; +import org.apache.hadoop.hbase.procedure.ProcedureMember; +import org.apache.hadoop.hbase.procedure.Subprocedure; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.hbase.regionserver.wal.FSHLog; + +/** + * This backup subprocedure implementation forces a log roll on the RS. + */ +public class LogRollBackupSubprocedure extends Subprocedure { + private static final Log LOG = LogFactory.getLog(LogRollBackupSubprocedure.class); + + private final RegionServerServices rss; + private final LogRollBackupSubprocedurePool taskManager; + private FSHLog hlog; + + public LogRollBackupSubprocedure(RegionServerServices rss, ProcedureMember member, + ForeignExceptionDispatcher errorListener, long wakeFrequency, long timeout, + LogRollBackupSubprocedurePool taskManager) { + + super(member, LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, errorListener, + wakeFrequency, timeout); + LOG.info("Constructing a LogRollBackupSubprocedure."); + this.rss = rss; + this.taskManager = taskManager; + } + + /** + * Callable task. TODO. We don't need a thread pool to execute roll log. This can be simplified + * with no use of subprocedurepool. + */ + class RSRollLogTask implements Callable { + RSRollLogTask() { + } + + @Override + public Void call() throws Exception { + if (LOG.isDebugEnabled()) { + LOG.debug("++ DRPC started: " + rss.getServerName()); + } + hlog = (FSHLog) rss.getWAL(null); + long filenum = hlog.getFilenum(); + + LOG.info("Trying to roll log in backup subprocedure, current log number: " + filenum); + hlog.rollWriter(true); + LOG.info("After roll log in backup subprocedure, current log number: " + hlog.getFilenum()); + + Connection connection = rss.getConnection(); + try(final BackupSystemTable table = new BackupSystemTable(connection)) { + // sanity check, good for testing + HashMap serverTimestampMap = table.readRegionServerLastLogRollResult(); + String host = rss.getServerName().getHostname(); + int port = rss.getServerName().getPort(); + String server = host + ":" + port; + Long sts = serverTimestampMap.get(host); + if (sts != null && sts > filenum) { + LOG.warn("Won't update server's last roll log result: current=" + + sts + " new=" + filenum); + return null; + } + // write the log number to hbase:backup. + table.writeRegionServerLastLogRollResult(server, filenum); + return null; + } catch (Exception e) { + LOG.error(e); + throw e; // TODO: is this correct? + } + } + } + + private void rolllog() throws ForeignException { + monitor.rethrowException(); + + taskManager.submitTask(new RSRollLogTask()); + monitor.rethrowException(); + + // wait for everything to complete. + taskManager.waitForOutstandingTasks(); + monitor.rethrowException(); + + } + + @Override + public void acquireBarrier() throws ForeignException { + // do nothing, executing in inside barrier step. + } + + /** + * do a log roll. + * @return some bytes + */ + @Override + public byte[] insideBarrier() throws ForeignException { + rolllog(); + // FIXME + return null; + } + + /** + * Cancel threads if they haven't finished. + */ + @Override + public void cleanup(Exception e) { + taskManager.abort("Aborting log roll subprocedure tasks for backup due to error", e); + } + + /** + * Hooray! + */ + public void releaseBarrier() { + // NO OP + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java new file mode 100644 index 0000000..1ca638c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.regionserver; + +import java.io.Closeable; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorCompletionService; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.DaemonThreadFactory; +import org.apache.hadoop.hbase.errorhandling.ForeignException; + +/** + * Handle running each of the individual tasks for completing a backup procedure + * on a regionserver. + */ +public class LogRollBackupSubprocedurePool implements Closeable, Abortable { + private static final Log LOG = LogFactory.getLog(LogRollBackupSubprocedurePool.class); + + /** Maximum number of concurrent snapshot region tasks that can run concurrently */ + private static final String CONCURENT_BACKUP_TASKS_KEY = "hbase.backup.region.concurrentTasks"; + private static final int DEFAULT_CONCURRENT_BACKUP_TASKS = 3; + + private final ExecutorCompletionService taskPool; + private final ThreadPoolExecutor executor; + private volatile boolean aborted; + private final List> futures = new ArrayList>(); + private final String name; + + public LogRollBackupSubprocedurePool(String name, Configuration conf) { + // configure the executor service + long keepAlive = + conf.getLong(LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_KEY, + LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_DEFAULT); + int threads = conf.getInt(CONCURENT_BACKUP_TASKS_KEY, DEFAULT_CONCURRENT_BACKUP_TASKS); + this.name = name; + executor = + new ThreadPoolExecutor(1, threads, keepAlive, TimeUnit.SECONDS, + new LinkedBlockingQueue(), new DaemonThreadFactory("rs(" + name + + ")-backup-pool")); + taskPool = new ExecutorCompletionService(executor); + } + + /** + * Submit a task to the pool. + */ + public void submitTask(final Callable task) { + Future f = this.taskPool.submit(task); + futures.add(f); + } + + /** + * Wait for all of the currently outstanding tasks submitted via {@link #submitTask(Callable)} + * @return true on success, false otherwise + * @throws ForeignException exception + */ + public boolean waitForOutstandingTasks() throws ForeignException { + LOG.debug("Waiting for backup procedure to finish."); + + try { + for (Future f : futures) { + f.get(); + } + return true; + } catch (InterruptedException e) { + if (aborted) { + throw new ForeignException("Interrupted and found to be aborted while waiting for tasks!", + e); + } + Thread.currentThread().interrupt(); + } catch (ExecutionException e) { + if (e.getCause() instanceof ForeignException) { + throw (ForeignException) e.getCause(); + } + throw new ForeignException(name, e.getCause()); + } finally { + // close off remaining tasks + for (Future f : futures) { + if (!f.isDone()) { + f.cancel(true); + } + } + } + return false; + } + + /** + * Attempt to cleanly shutdown any running tasks - allows currently running tasks to cleanly + * finish + */ + @Override + public void close() { + executor.shutdown(); + } + + @Override + public void abort(String why, Throwable e) { + if (this.aborted) { + return; + } + + this.aborted = true; + LOG.warn("Aborting because: " + why, e); + this.executor.shutdownNow(); + } + + @Override + public boolean isAborted() { + return this.aborted; + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java new file mode 100644 index 0000000..aca190c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java @@ -0,0 +1,168 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.regionserver; + + +import java.io.IOException; +import java.util.concurrent.ThreadPoolExecutor; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CoordinatedStateManagerFactory; +import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; +import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; +import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; +import org.apache.hadoop.hbase.procedure.ProcedureMember; +import org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs; +import org.apache.hadoop.hbase.procedure.RegionServerProcedureManager; +import org.apache.hadoop.hbase.procedure.Subprocedure; +import org.apache.hadoop.hbase.procedure.SubprocedureFactory; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; + +/** + * This manager class handles the work dealing with backup for a {@link HRegionServer}. + *

+ * This provides the mechanism necessary to kick off a backup specific {@link Subprocedure} that is + * responsible by this region server. If any failures occur with the subprocedure, the manager's + * procedure member notifies the procedure coordinator to abort all others. + *

+ * On startup, requires {@link #start()} to be called. + *

+ * On shutdown, requires org.apache.hadoop.hbase.procedure.ProcedureMember.close() to be + * called + */ +public class LogRollRegionServerProcedureManager extends RegionServerProcedureManager { + + private static final Log LOG = LogFactory.getLog(LogRollRegionServerProcedureManager.class); + + /** Conf key for number of request threads to start backup on regionservers */ + public static final String BACKUP_REQUEST_THREADS_KEY = "hbase.backup.region.pool.threads"; + /** # of threads for backup work on the rs. */ + public static final int BACKUP_REQUEST_THREADS_DEFAULT = 10; + + public static final String BACKUP_TIMEOUT_MILLIS_KEY = "hbase.backup.timeout"; + public static final long BACKUP_TIMEOUT_MILLIS_DEFAULT = 60000; + + /** Conf key for millis between checks to see if backup work completed or if there are errors */ + public static final String BACKUP_REQUEST_WAKE_MILLIS_KEY = "hbase.backup.region.wakefrequency"; + /** Default amount of time to check for errors while regions finish backup work */ + private static final long BACKUP_REQUEST_WAKE_MILLIS_DEFAULT = 500; + + private RegionServerServices rss; + private ProcedureMemberRpcs memberRpcs; + private ProcedureMember member; + + /** + * Create a default backup procedure manager + */ + public LogRollRegionServerProcedureManager() { + } + + /** + * Start accepting backup procedure requests. + */ + @Override + public void start() { + this.memberRpcs.start(rss.getServerName().toString(), member); + LOG.info("Started region server backup manager."); + } + + /** + * Close this and all running backup procedure tasks + * @param force forcefully stop all running tasks + * @throws IOException exception + */ + @Override + public void stop(boolean force) throws IOException { + String mode = force ? "abruptly" : "gracefully"; + LOG.info("Stopping RegionServerBackupManager " + mode + "."); + + try { + this.member.close(); + } finally { + this.memberRpcs.close(); + } + } + + /** + * If in a running state, creates the specified subprocedure for handling a backup procedure. + * @return Subprocedure to submit to the ProcedureMemeber. + */ + public Subprocedure buildSubprocedure() { + + // don't run a backup if the parent is stop(ping) + if (rss.isStopping() || rss.isStopped()) { + throw new IllegalStateException("Can't start backup procedure on RS: " + rss.getServerName() + + ", because stopping/stopped!"); + } + + LOG.info("Attempting to run a roll log procedure for backup."); + ForeignExceptionDispatcher errorDispatcher = new ForeignExceptionDispatcher(); + Configuration conf = rss.getConfiguration(); + long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT); + long wakeMillis = + conf.getLong(BACKUP_REQUEST_WAKE_MILLIS_KEY, BACKUP_REQUEST_WAKE_MILLIS_DEFAULT); + + LogRollBackupSubprocedurePool taskManager = + new LogRollBackupSubprocedurePool(rss.getServerName().toString(), conf); + return new LogRollBackupSubprocedure(rss, member, errorDispatcher, wakeMillis, timeoutMillis, + taskManager); + + } + + /** + * Build the actual backup procedure runner that will do all the 'hard' work + */ + public class BackupSubprocedureBuilder implements SubprocedureFactory { + + @Override + public Subprocedure buildSubprocedure(String name, byte[] data) { + return LogRollRegionServerProcedureManager.this.buildSubprocedure(); + } + } + + @Override + public void initialize(RegionServerServices rss) throws IOException { + this.rss = rss; + BaseCoordinatedStateManager coordManager = + (BaseCoordinatedStateManager) CoordinatedStateManagerFactory.getCoordinatedStateManager(rss + .getConfiguration()); + coordManager.initialize(rss); + this.memberRpcs = + coordManager + .getProcedureMemberRpcs(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE); + + // read in the backup handler configuration properties + Configuration conf = rss.getConfiguration(); + long keepAlive = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT); + int opThreads = conf.getInt(BACKUP_REQUEST_THREADS_KEY, BACKUP_REQUEST_THREADS_DEFAULT); + // create the actual cohort member + ThreadPoolExecutor pool = + ProcedureMember.defaultPool(rss.getServerName().toString(), opThreads, keepAlive); + this.member = new ProcedureMember(memberRpcs, pool, new BackupSubprocedureBuilder()); + } + + @Override + public String getProcedureSignature() { + return "backup-proc"; + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LogUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LogUtils.java new file mode 100644 index 0000000..26f261c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LogUtils.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import org.apache.commons.logging.Log; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; + +public final class LogUtils { + + private LogUtils() { + } + /** + * Disables Zk- and HBase client logging + * @param log + */ + public static void disableUselessLoggers(Log log) { + // disable zookeeper log to avoid it mess up command output + Logger zkLogger = Logger.getLogger("org.apache.zookeeper"); + zkLogger.setLevel(Level.OFF); + // disable hbase zookeeper tool log to avoid it mess up command output + Logger hbaseZkLogger = Logger.getLogger("org.apache.hadoop.hbase.zookeeper"); + hbaseZkLogger.setLevel(Level.OFF); + // disable hbase client log to avoid it mess up command output + Logger hbaseClientLogger = Logger.getLogger("org.apache.hadoop.hbase.client"); + hbaseClientLogger.setLevel(Level.OFF); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java new file mode 100644 index 0000000..84b7c78 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -0,0 +1,209 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState; +import org.apache.hadoop.hbase.backup.impl.BackupContext; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; +import org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +/** + * This class is only a base for other integration-level backup tests. Do not add tests here. + * TestBackupSmallTests is where tests that don't require bring machines up/down should go All other + * tests should have their own classes and extend this one + */ +public class TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestBackupBase.class); + + protected static Configuration conf1; + protected static Configuration conf2; + + protected static HBaseTestingUtility TEST_UTIL; + protected static HBaseTestingUtility TEST_UTIL2; + protected static TableName table1; + protected static TableName table2; + protected static TableName table3; + protected static TableName table4; + + protected static TableName table1_restore = TableName.valueOf("table1_restore"); + protected static TableName table2_restore = TableName.valueOf("table2_restore"); + protected static TableName table3_restore = TableName.valueOf("table3_restore"); + protected static TableName table4_restore = TableName.valueOf("table4_restore"); + + protected static final int NB_ROWS_IN_BATCH = 100; + protected static final byte[] qualName = Bytes.toBytes("q1"); + protected static final byte[] famName = Bytes.toBytes("f"); + + protected static String BACKUP_ROOT_DIR = "/backupUT"; + protected static String BACKUP_REMOTE_ROOT_DIR = "/backupUT"; + + protected static final String BACKUP_ZNODE = "/backup/hbase"; + protected static final String BACKUP_SUCCEED_NODE = "complete"; + protected static final String BACKUP_FAILED_NODE = "failed"; + + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL = new HBaseTestingUtility(); + TEST_UTIL.getConfiguration().set("hbase.procedure.regionserver.classes", + LogRollRegionServerProcedureManager.class.getName()); + TEST_UTIL.getConfiguration().set("hbase.procedure.master.classes", + LogRollMasterProcedureManager.class.getName()); + TEST_UTIL.getConfiguration().set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); + TEST_UTIL.startMiniZKCluster(); + MiniZooKeeperCluster miniZK = TEST_UTIL.getZkCluster(); + + conf1 = TEST_UTIL.getConfiguration(); + conf2 = HBaseConfiguration.create(conf1); + conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); + TEST_UTIL2 = new HBaseTestingUtility(conf2); + TEST_UTIL2.setZkCluster(miniZK); + TEST_UTIL.startMiniCluster(); + TEST_UTIL2.startMiniCluster(); + conf1 = TEST_UTIL.getConfiguration(); + + TEST_UTIL.startMiniMapReduceCluster(); + BACKUP_ROOT_DIR = TEST_UTIL.getConfiguration().get("fs.defaultFS") + "/backupUT"; + LOG.info("ROOTDIR " + BACKUP_ROOT_DIR); + BACKUP_REMOTE_ROOT_DIR = TEST_UTIL2.getConfiguration().get("fs.defaultFS") + "/backupUT"; + LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR); + + createTables(); + } + + /** + * @throws java.lang.Exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getHBaseAdmin()); + SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL); + TEST_UTIL2.shutdownMiniCluster(); + TEST_UTIL.shutdownMiniCluster(); + TEST_UTIL.shutdownMiniMapReduceCluster(); + } + + protected static void loadTable(HTable table) throws Exception { + + Put p; // 100 + 1 row to t1_syncup + for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { + p = new Put(Bytes.toBytes("row" + i)); + p.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + table.put(p); + } + } + + protected static void createTables() throws Exception { + + long tid = System.currentTimeMillis(); + table1 = TableName.valueOf("test-" + tid); + HBaseAdmin ha = TEST_UTIL.getHBaseAdmin(); + HTableDescriptor desc = new HTableDescriptor(table1); + HColumnDescriptor fam = new HColumnDescriptor(famName); + desc.addFamily(fam); + ha.createTable(desc); + Connection conn = ConnectionFactory.createConnection(conf1); + HTable table = (HTable) conn.getTable(table1); + loadTable(table); + table.close(); + table2 = TableName.valueOf("test-" + tid + 1); + desc = new HTableDescriptor(table2); + desc.addFamily(fam); + ha.createTable(desc); + table = (HTable) conn.getTable(table2); + loadTable(table); + table.close(); + table3 = TableName.valueOf("test-" + tid + 2); + table = TEST_UTIL.createTable(table3, famName); + table.close(); + table4 = TableName.valueOf("test-" + tid + 3); + table = TEST_UTIL.createTable(table4, famName); + table.close(); + ha.close(); + conn.close(); + } + + protected boolean checkSucceeded(String backupId) throws IOException { + BackupContext status = getBackupContext(backupId); + if (status == null) return false; + return status.getState() == BackupState.COMPLETE; + } + + protected boolean checkFailed(String backupId) throws IOException { + BackupContext status = getBackupContext(backupId); + if (status == null) return false; + return status.getState() == BackupState.FAILED; + } + + private BackupContext getBackupContext(String backupId) throws IOException { + Configuration conf = conf1;//BackupClientImpl.getConf(); + try (Connection connection = ConnectionFactory.createConnection(conf); + BackupSystemTable table = new BackupSystemTable(connection)) { + BackupContext status = table.readBackupStatus(backupId); + return status; + } + } + + protected BackupClient getBackupClient(){ + return BackupRestoreFactory.getBackupClient(conf1); + } + + protected RestoreClient getRestoreClient() + { + return BackupRestoreFactory.getRestoreClient(conf1); + } + + /** + * Helper method + */ + protected List toList(String... args){ + List ret = new ArrayList<>(); + for(int i=0; i < args.length; i++){ + ret.add(TableName.valueOf(args[i])); + } + return ret; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java new file mode 100644 index 0000000..21bf63c --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java @@ -0,0 +1,105 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestBackupBoundaryTests extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestBackupBoundaryTests.class); + + /** + * Verify that full backup is created on a single empty table correctly. + * @throws Exception + */ + @Test + public void testFullBackupSingleEmpty() throws Exception { + + LOG.info("create full backup image on single table"); + List tables = Lists.newArrayList(table3); + String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); + LOG.info("Finished Backup"); + assertTrue(checkSucceeded(backupId)); + } + + /** + * Verify that full backup is created on multiple empty tables correctly. + * @throws Exception + */ + @Test + public void testFullBackupMultipleEmpty() throws Exception { + LOG.info("create full backup image on mulitple empty tables"); + + List tables = Lists.newArrayList(table3, table4); + String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupId)); + } + + /** + * Verify that full backup fails on a single table that does not exist. + * @throws Exception + */ + @Test(expected = DoNotRetryIOException.class) + public void testFullBackupSingleDNE() throws Exception { + + LOG.info("test full backup fails on a single table that does not exist"); + List tables = toList("tabledne"); + String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupId)); + } + + /** + * Verify that full backup fails on multiple tables that do not exist. + * @throws Exception + */ + @Test(expected = DoNotRetryIOException.class) + public void testFullBackupMultipleDNE() throws Exception { + + LOG.info("test full backup fails on multiple tables that do not exist"); + List tables = toList("table1dne", "table2dne"); + String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupId)); + } + + /** + * Verify that full backup fails on tableset containing real and fake tables. + * @throws Exception + */ + @Test(expected = DoNotRetryIOException.class) + public void testFullBackupMixExistAndDNE() throws Exception { + LOG.info("create full backup fails on tableset containing real and fake table"); + + List tables = toList(table1.getNameAsString(), "tabledne"); + String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); + //assertTrue(checkSucceeded(backupId)); // TODO + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java new file mode 100644 index 0000000..899f53b --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java @@ -0,0 +1,160 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.master.BackupLogCleaner; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.wal.DefaultWALProvider; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestBackupLogCleaner extends TestBackupBase { + private static final Log LOG = LogFactory.getLog(TestBackupLogCleaner.class); + + // implements all test cases in 1 test since incremental full backup/ + // incremental backup has dependencies + @Test + public void testBackupLogCleaner() throws Exception { + + // #1 - create full backup for all tables + LOG.info("create full backup image for all tables"); + + List tableSetFullList = Lists.newArrayList(table1, table2, table3, table4); + + try (Connection connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + BackupSystemTable systemTable = new BackupSystemTable(connection)) { + // Verify that we have no backup sessions yet + assertFalse(systemTable.hasBackupSessions()); + + List walFiles = getListOfWALFiles(TEST_UTIL.getConfiguration()); + List swalFiles = convert(walFiles); + BackupLogCleaner cleaner = new BackupLogCleaner(); + cleaner.setConf(TEST_UTIL.getConfiguration()); + + Iterable deletable = cleaner.getDeletableFiles(walFiles); + // We can delete all files because we do not have yet recorded backup sessions + assertTrue(Iterables.size(deletable) == walFiles.size()); + + systemTable.addWALFiles(swalFiles, "backup"); + String backupIdFull = getBackupClient().create(BackupType.FULL, tableSetFullList, + BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupIdFull)); + // Check one more time + deletable = cleaner.getDeletableFiles(walFiles); + // We can delete wal files because they were saved into hbase:backup table + int size = Iterables.size(deletable); + assertTrue(size == walFiles.size()); + + List newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration()); + LOG.debug("WAL list after full backup"); + convert(newWalFiles); + + // New list of wal files is greater than the previous one, + // because new wal per RS have been opened after full backup + assertTrue(walFiles.size() < newWalFiles.size()); + // TODO : verify that result files are not walFiles collection + Connection conn = ConnectionFactory.createConnection(conf1); + // #2 - insert some data to table + HTable t1 = (HTable) conn.getTable(table1); + Put p1; + for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { + p1 = new Put(Bytes.toBytes("row-t1" + i)); + p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t1.put(p1); + } + + t1.close(); + + HTable t2 = (HTable) conn.getTable(table2); + Put p2; + for (int i = 0; i < 5; i++) { + p2 = new Put(Bytes.toBytes("row-t2" + i)); + p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t2.put(p2); + } + + t2.close(); + + // #3 - incremental backup for multiple tables + + List tableSetIncList = Lists.newArrayList(table1, table2, table3); + String backupIdIncMultiple = + getBackupClient().create(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupIdIncMultiple)); + deletable = cleaner.getDeletableFiles(newWalFiles); + + assertTrue(Iterables.size(deletable) == newWalFiles.size()); + + conn.close(); + } + } + + private List convert(List walFiles) { + List result = new ArrayList(); + for (FileStatus fs : walFiles) { + LOG.debug("+++WAL: " + fs.getPath().toString()); + result.add(fs.getPath().toString()); + } + return result; + } + + private List getListOfWALFiles(Configuration c) throws IOException { + Path logRoot = new Path(FSUtils.getRootDir(c), HConstants.HREGION_LOGDIR_NAME); + FileSystem fs = FileSystem.get(c); + RemoteIterator it = fs.listFiles(logRoot, true); + List logFiles = new ArrayList(); + while (it.hasNext()) { + LocatedFileStatus lfs = it.next(); + if (lfs.isFile() && !DefaultWALProvider.isMetaFile(lfs.getPath())) { + logFiles.add(lfs); + LOG.info(lfs); + } + } + return logFiles; + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java new file mode 100644 index 0000000..2dc31df --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java @@ -0,0 +1,350 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupContext; +import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.BackupUtil.BackupCompleteData; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Test cases for hbase:backup API + * + */ +@Category(MediumTests.class) +public class TestBackupSystemTable { + + private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + protected static Configuration conf = UTIL.getConfiguration(); + protected static MiniHBaseCluster cluster; + protected static Connection conn; + protected BackupSystemTable table; + + @BeforeClass + public static void setUp() throws Exception { + cluster = UTIL.startMiniCluster(); + conn = ConnectionFactory.createConnection(UTIL.getConfiguration()); + } + + @Before + public void before() throws IOException { + table = new BackupSystemTable(conn); + } + + @After + public void after() { + if (table != null) { + table.close(); + } + } + + @Test + public void testUpdateReadDeleteBackupStatus() throws IOException { + BackupContext ctx = createBackupContext(); + table.updateBackupStatus(ctx); + BackupContext readCtx = table.readBackupStatus(ctx.getBackupId()); + assertTrue(compare(ctx, readCtx)); + + // try fake backup id + readCtx = table.readBackupStatus("fake"); + + assertNull(readCtx); + // delete backup context + table.deleteBackupStatus(ctx.getBackupId()); + readCtx = table.readBackupStatus(ctx.getBackupId()); + assertNull(readCtx); + cleanBackupTable(); + } + + @Test + public void testWriteReadBackupStartCode() throws IOException { + Long code = 100L; + table.writeBackupStartCode(code); + String readCode = table.readBackupStartCode(); + assertEquals(code, new Long(Long.parseLong(readCode))); + cleanBackupTable(); + } + + private void cleanBackupTable() throws IOException { + Admin admin = UTIL.getHBaseAdmin(); + admin.disableTable(BackupSystemTable.getTableName()); + admin.truncateTable(BackupSystemTable.getTableName(), true); + if (admin.isTableDisabled(BackupSystemTable.getTableName())) { + admin.enableTable(BackupSystemTable.getTableName()); + } + } + + @Test + public void testBackupHistory() throws IOException { + int n = 10; + List list = createBackupContextList(n); + + // Load data + for (BackupContext bc : list) { + // Make sure we set right status + bc.setState(BackupState.COMPLETE); + table.updateBackupStatus(bc); + } + + // Reverse list for comparison + Collections.reverse(list); + ArrayList history = table.getBackupHistory(); + assertTrue(history.size() == n); + + for (int i = 0; i < n; i++) { + BackupContext ctx = list.get(i); + BackupCompleteData data = history.get(i); + assertTrue(compare(ctx, data)); + } + + cleanBackupTable(); + + } + + @Test + public void testRegionServerLastLogRollResults() throws IOException { + String[] servers = new String[] { "server1", "server2", "server3" }; + Long[] timestamps = new Long[] { 100L, 102L, 107L }; + + for (int i = 0; i < servers.length; i++) { + table.writeRegionServerLastLogRollResult(servers[i], timestamps[i]); + } + + HashMap result = table.readRegionServerLastLogRollResult(); + assertTrue(servers.length == result.size()); + Set keys = result.keySet(); + String[] keysAsArray = new String[keys.size()]; + keys.toArray(keysAsArray); + Arrays.sort(keysAsArray); + + for (int i = 0; i < keysAsArray.length; i++) { + assertEquals(keysAsArray[i], servers[i]); + Long ts1 = timestamps[i]; + Long ts2 = result.get(keysAsArray[i]); + assertEquals(ts1, ts2); + } + + cleanBackupTable(); + } + + @Test + public void testIncrementalBackupTableSet() throws IOException { + TreeSet tables1 = new TreeSet<>(); + + tables1.add(TableName.valueOf("t1")); + tables1.add(TableName.valueOf("t2")); + tables1.add(TableName.valueOf("t3")); + + TreeSet tables2 = new TreeSet<>(); + + tables2.add(TableName.valueOf("t3")); + tables2.add(TableName.valueOf("t4")); + tables2.add(TableName.valueOf("t5")); + + table.addIncrementalBackupTableSet(tables1); + TreeSet res1 = (TreeSet) table.getIncrementalBackupTableSet(); + assertTrue(tables1.size() == res1.size()); + Iterator desc1 = tables1.descendingIterator(); + Iterator desc2 = res1.descendingIterator(); + while (desc1.hasNext()) { + assertEquals(desc1.next(), desc2.next()); + } + + table.addIncrementalBackupTableSet(tables2); + TreeSet res2 = (TreeSet) table.getIncrementalBackupTableSet(); + assertTrue((tables2.size() + tables1.size() - 1) == res2.size()); + + tables1.addAll(tables2); + + desc1 = tables1.descendingIterator(); + desc2 = res2.descendingIterator(); + + while (desc1.hasNext()) { + assertEquals(desc1.next(), desc2.next()); + } + cleanBackupTable(); + + } + + @Test + public void testRegionServerLogTimestampMap() throws IOException { + TreeSet tables = new TreeSet<>(); + + tables.add(TableName.valueOf("t1")); + tables.add(TableName.valueOf("t2")); + tables.add(TableName.valueOf("t3")); + + HashMap rsTimestampMap = new HashMap(); + + rsTimestampMap.put("rs1", 100L); + rsTimestampMap.put("rs2", 101L); + rsTimestampMap.put("rs3", 103L); + + table.writeRegionServerLogTimestamp(tables, rsTimestampMap); + + HashMap> result = table.readLogTimestampMap(); + + assertTrue(tables.size() == result.size()); + + for (TableName t : tables) { + HashMap rstm = result.get(t); + assertNotNull(rstm); + assertEquals(rstm.get("rs1"), new Long(100L)); + assertEquals(rstm.get("rs2"), new Long(101L)); + assertEquals(rstm.get("rs3"), new Long(103L)); + } + + Set tables1 = new TreeSet<>(); + + tables1.add(TableName.valueOf("t3")); + tables1.add(TableName.valueOf("t4")); + tables1.add(TableName.valueOf("t5")); + + HashMap rsTimestampMap1 = new HashMap(); + + rsTimestampMap1.put("rs1", 200L); + rsTimestampMap1.put("rs2", 201L); + rsTimestampMap1.put("rs3", 203L); + + table.writeRegionServerLogTimestamp(tables1, rsTimestampMap1); + + result = table.readLogTimestampMap(); + + assertTrue(5 == result.size()); + + for (TableName t : tables) { + HashMap rstm = result.get(t); + assertNotNull(rstm); + if (t.equals(TableName.valueOf("t3")) == false) { + assertEquals(rstm.get("rs1"), new Long(100L)); + assertEquals(rstm.get("rs2"), new Long(101L)); + assertEquals(rstm.get("rs3"), new Long(103L)); + } else { + assertEquals(rstm.get("rs1"), new Long(200L)); + assertEquals(rstm.get("rs2"), new Long(201L)); + assertEquals(rstm.get("rs3"), new Long(203L)); + } + } + + for (TableName t : tables1) { + HashMap rstm = result.get(t); + assertNotNull(rstm); + assertEquals(rstm.get("rs1"), new Long(200L)); + assertEquals(rstm.get("rs2"), new Long(201L)); + assertEquals(rstm.get("rs3"), new Long(203L)); + } + + cleanBackupTable(); + + } + + @Test + public void testAddWALFiles() throws IOException { + List files = + Arrays.asList("hdfs://server/WALs/srv1,101,15555/srv1,101,15555.default.1", + "hdfs://server/WALs/srv2,102,16666/srv2,102,16666.default.2", + "hdfs://server/WALs/srv3,103,17777/srv3,103,17777.default.3"); + String newFile = "hdfs://server/WALs/srv1,101,15555/srv1,101,15555.default.5"; + + table.addWALFiles(files, "backup"); + + assertTrue(table.checkWALFile(files.get(0))); + assertTrue(table.checkWALFile(files.get(1))); + assertTrue(table.checkWALFile(files.get(2))); + assertFalse(table.checkWALFile(newFile)); + + cleanBackupTable(); + } + + private boolean compare(BackupContext ctx, BackupCompleteData data) { + + return ctx.getBackupId().equals(data.getBackupToken()) + && ctx.getTargetRootDir().equals(data.getBackupRootPath()) + && ctx.getType().toString().equals(data.getType()) + && ctx.getStartTs() == Long.parseLong(data.getStartTime()) + && ctx.getEndTs() == Long.parseLong(data.getEndTime()); + + } + + private boolean compare(BackupContext one, BackupContext two) { + return one.getBackupId().equals(two.getBackupId()) && one.getType().equals(two.getType()) + && one.getTargetRootDir().equals(two.getTargetRootDir()) + && one.getStartTs() == two.getStartTs() && one.getEndTs() == two.getEndTs(); + } + + private BackupContext createBackupContext() { + + BackupContext ctxt = + new BackupContext("backup_" + System.nanoTime(), BackupType.FULL, + new TableName[] { + TableName.valueOf("t1"), TableName.valueOf("t2"), TableName.valueOf("t3") }, + "/hbase/backup"); + ctxt.setStartTs(System.currentTimeMillis()); + ctxt.setEndTs(System.currentTimeMillis() + 1); + return ctxt; + } + + private List createBackupContextList(int size) { + List list = new ArrayList(); + for (int i = 0; i < size; i++) { + list.add(createBackupContext()); + try { + Thread.sleep(10); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + return list; + } + + @AfterClass + public static void tearDown() throws IOException { + if (cluster != null) cluster.shutdown(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java new file mode 100644 index 0000000..d9bade1 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestFullBackup extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestFullBackup.class); + + /** + * Verify that full backup is created on a single table with data correctly. + * @throws Exception + */ + @Test + public void testFullBackupSingle() throws Exception { + LOG.info("test full backup on a single table with data"); + List tables = Lists.newArrayList(table1); + String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete"); + } + + /** + * Verify that full backup is created on multiple tables correctly. + * @throws Exception + */ + @Test + public void testFullBackupMultiple() throws Exception { + LOG.info("create full backup image on multiple tables with data"); + List tables = Lists.newArrayList(table1, table1); + String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupId)); + } + + /** + * Verify that full backup is created on all tables correctly. + * @throws Exception + */ + @Test + public void testFullBackupAll() throws Exception { + LOG.info("create full backup image on all tables"); + String backupId = getBackupClient().create(BackupType.FULL, null, BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupId)); + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java new file mode 100644 index 0000000..b376de2 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java @@ -0,0 +1,172 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestFullRestore extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestFullRestore.class); + + /** + * Verify that a single table is restored to a new table + * @throws Exception + */ + @Test + public void testFullRestoreSingle() throws Exception { + + LOG.info("test full restore on a single table empty table"); + + List tables = Lists.newArrayList(table1); + String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete"); + + TableName[] tableset = new TableName[] { table1 }; + TableName[] tablemap = new TableName[] { table1_restore }; + Path path = new Path(BACKUP_ROOT_DIR); + HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId); + RestoreClient client = getRestoreClient(); + client.restore(hbfs, BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap, false); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table1_restore)); + TEST_UTIL.deleteTable(table1_restore); + hba.close(); + } + + /** + * Verify that multiple tables are restored to new tables. + * @throws Exception + */ + @Test + public void testFullRestoreMultiple() throws Exception { + LOG.info("create full backup image on multiple tables"); + List tables = Lists.newArrayList(table2, table3); + String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupId)); + + TableName[] restore_tableset = new TableName[] { table2, table3 }; + TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; + Path path = new Path(BACKUP_ROOT_DIR); + HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId); + RestoreClient client = getRestoreClient(); + client.restore(hbfs, BACKUP_ROOT_DIR, backupId, false, false, + restore_tableset, tablemap, false); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table2_restore)); + assertTrue(hba.tableExists(table3_restore)); + TEST_UTIL.deleteTable(table2_restore); + TEST_UTIL.deleteTable(table3_restore); + hba.close(); + } + + /** + * Verify that a single table is restored using overwrite + * @throws Exception + */ + @Test + public void testFullRestoreSingleOverwrite() throws Exception { + + LOG.info("test full restore on a single table empty table"); + List tables = Lists.newArrayList(table1); + String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete"); + + TableName[] tableset = new TableName[] { table1 }; + Path path = new Path(BACKUP_ROOT_DIR); + HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId); + RestoreClient client = getRestoreClient(); + client.restore(hbfs, BACKUP_ROOT_DIR, backupId, false, false, tableset, null, + true); + } + + /** + * Verify that multiple tables are restored to new tables using overwrite. + * @throws Exception + */ + @Test + public void testFullRestoreMultipleOverwrite() throws Exception { + LOG.info("create full backup image on multiple tables"); + + List tables = Lists.newArrayList(table2, table3); + String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupId)); + + TableName[] restore_tableset = new TableName[] { table2, table3 }; + Path path = new Path(BACKUP_ROOT_DIR); + HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId); + RestoreClient client = getRestoreClient(); + client.restore(hbfs, BACKUP_ROOT_DIR, backupId, false, + false, restore_tableset, null, true); + } + + /** + * Verify that restore fails on a single table that does not exist. + * @throws Exception + */ + @Test(expected = IOException.class) + public void testFullRestoreSingleDNE() throws Exception { + + LOG.info("test restore fails on a single table that does not exist"); + List tables = Lists.newArrayList(table1); + String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete"); + + TableName[] tableset = new TableName[] { TableName.valueOf("faketable") }; + TableName[] tablemap = new TableName[] { table1_restore }; + Path path = new Path(BACKUP_ROOT_DIR); + HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId); + RestoreClient client = getRestoreClient(); + client.restore(hbfs, BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap, + false); + } + + /** + * Verify that restore fails on multiple tables that do not exist. + * @throws Exception + */ + @Test(expected = IOException.class) + public void testFullRestoreMultipleDNE() throws Exception { + + LOG.info("test restore fails on multiple tables that do not exist"); + + List tables = Lists.newArrayList(table2, table3); + String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupId)); + + TableName[] restore_tableset + = new TableName[] { TableName.valueOf("faketable1"), TableName.valueOf("faketable2") }; + TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; + Path path = new Path(BACKUP_ROOT_DIR); + HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId); + RestoreClient client = getRestoreClient(); + client.restore(hbfs, BACKUP_ROOT_DIR, backupId, false, + false, restore_tableset, tablemap, false); + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java new file mode 100644 index 0000000..e1c64c3 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java @@ -0,0 +1,177 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.hamcrest.CoreMatchers; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestIncrementalBackup extends TestBackupBase { + private static final Log LOG = LogFactory.getLog(TestIncrementalBackup.class); + //implement all testcases in 1 test since incremental backup/restore has dependencies + @Test + public void TestIncBackupRestore() throws Exception { + HBackupFileSystem hbfs; + + // #1 - create full backup for all tables + LOG.info("create full backup image for all tables"); + + List tables = Lists.newArrayList(table1, table2, table3, table4); + String backupIdFull = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); + + assertTrue(checkSucceeded(backupIdFull)); + + Connection conn = ConnectionFactory.createConnection(conf1); + // #2 - insert some data to table + HTable t1 = (HTable) conn.getTable(table1); + Put p1; + for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { + p1 = new Put(Bytes.toBytes("row-t1" + i)); + p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t1.put(p1); + } + + Assert.assertThat(TEST_UTIL.countRows(t1), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2)); + t1.close(); + + HTable t2 = (HTable) conn.getTable(table2); + Put p2; + for (int i = 0; i < 5; i++) { + p2 = new Put(Bytes.toBytes("row-t2" + i)); + p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t2.put(p2); + } + + Assert.assertThat(TEST_UTIL.countRows(t2), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5)); + t2.close(); + + // #3 - incremental backup for multiple tables + + + tables = Lists.newArrayList(table1, table2, table3); + String backupIdIncMultiple = getBackupClient().create(BackupType.INCREMENTAL, + tables, BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupIdIncMultiple)); + + + // #4 - restore full backup for all tables, without overwrite + TableName[] tablesRestoreFull = + new TableName[] { table1, table2, table3, table4 }; + + TableName[] tablesMapFull = + new TableName[] { table1_restore, table2_restore, table3_restore, table4_restore }; + + hbfs = new HBackupFileSystem(conf1, new Path(BACKUP_ROOT_DIR), backupIdFull); + RestoreClient client = getRestoreClient(); + client.restore(hbfs, BACKUP_ROOT_DIR, backupIdFull, false, false, + tablesRestoreFull, + tablesMapFull, false); + + // #5.1 - check tables for full restore + HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin(); + assertTrue(hAdmin.tableExists(table1_restore)); + assertTrue(hAdmin.tableExists(table2_restore)); + assertTrue(hAdmin.tableExists(table3_restore)); + assertTrue(hAdmin.tableExists(table4_restore)); + + hAdmin.close(); + + // #5.2 - checking row count of tables for full restore + HTable hTable = (HTable) conn.getTable(table1_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH)); + hTable.close(); + + hTable = (HTable) conn.getTable(table2_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH)); + hTable.close(); + + hTable = (HTable) conn.getTable(table3_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0)); + hTable.close(); + + hTable = (HTable) conn.getTable(table4_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0)); + hTable.close(); + + // #6 - restore incremental backup for multiple tables, with overwrite + TableName[] tablesRestoreIncMultiple = + new TableName[] { table1, table2, table3 }; + TableName[] tablesMapIncMultiple = + new TableName[] { table1_restore, table2_restore, table3_restore }; + hbfs = new HBackupFileSystem(conf1, new Path(BACKUP_ROOT_DIR), backupIdIncMultiple); + client = getRestoreClient(); + client.restore(hbfs, BACKUP_ROOT_DIR, backupIdIncMultiple, false, false, + tablesRestoreIncMultiple, tablesMapIncMultiple, true); + + hTable = (HTable) conn.getTable(table1_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2)); + hTable.close(); + + hTable = (HTable) conn.getTable(table2_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5)); + hTable.close(); + + hTable = (HTable) conn.getTable(table3_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0)); + hTable.close(); + + // #7 - incremental backup for single, empty table + + tables = toList(table4.getNameAsString()); + String backupIdIncEmpty = + getBackupClient().create(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupIdIncEmpty)); + + + // #8 - restore incremental backup for single empty table, with overwrite + TableName[] tablesRestoreIncEmpty = new TableName[] { table4 }; + TableName[] tablesMapIncEmpty = new TableName[] { table4_restore }; + hbfs = new HBackupFileSystem(conf1, new Path(BACKUP_ROOT_DIR), backupIdIncEmpty); + + getRestoreClient().restore(hbfs, BACKUP_ROOT_DIR, backupIdIncEmpty, false, false, + tablesRestoreIncEmpty, + tablesMapIncEmpty, true); + + hTable = (HTable) conn.getTable(table4_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0)); + hTable.close(); + conn.close(); + + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java new file mode 100644 index 0000000..035188c --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestRemoteBackup extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestRemoteBackup.class); + + /** + * Verify that a remote full backup is created on a single table with data correctly. + * @throws Exception + */ + @Test + public void testFullBackupRemote() throws Exception { + + LOG.info("test remote full backup on a single table"); + + String backupId = + getBackupClient().create(BackupType.FULL, + Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR); + LOG.info("backup complete"); + assertTrue(checkSucceeded(backupId)); + } + +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java new file mode 100644 index 0000000..304d858 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(LargeTests.class) +public class TestRemoteRestore extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestRemoteRestore.class); + + /** + * Verify that a remote restore on a single table is successful. + * @throws Exception + */ + @Test + public void testFullRestoreRemote() throws Exception { + + LOG.info("test remote full backup on a single table"); + String backupId = + getBackupClient().create(BackupType.FULL, toList(table1.getNameAsString()), + BACKUP_REMOTE_ROOT_DIR); + LOG.info("backup complete"); + assertTrue(checkSucceeded(backupId)); + TableName[] tableset = new TableName[] { table1 }; + TableName[] tablemap = new TableName[] { table1_restore }; + Path path = new Path(BACKUP_REMOTE_ROOT_DIR); + HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId); + getRestoreClient().restore(hbfs, BACKUP_REMOTE_ROOT_DIR, backupId, false, false, tableset, + tablemap, false); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table1_restore)); + TEST_UTIL.deleteTable(table1_restore); + hba.close(); + } + +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java new file mode 100644 index 0000000..16ba991 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java @@ -0,0 +1,86 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(LargeTests.class) +public class TestRestoreBoundaryTests extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestRestoreBoundaryTests.class); + + /** + * Verify that a single empty table is restored to a new table + * @throws Exception + */ + @Test + public void testFullRestoreSingleEmpty() throws Exception { + LOG.info("test full restore on a single table empty table"); + String backupId = + getBackupClient().create(BackupType.FULL, toList(table1.getNameAsString()), + BACKUP_ROOT_DIR); + LOG.info("backup complete"); + assertTrue(checkSucceeded(backupId)); + TableName[] tableset = new TableName[] { table1 }; + TableName[] tablemap = new TableName[] { table1_restore }; + Path path = new Path(BACKUP_ROOT_DIR); + HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId); + getRestoreClient().restore(hbfs, BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap, + false); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table1_restore)); + TEST_UTIL.deleteTable(table1_restore); + } + + /** + * Verify that multiple tables are restored to new tables. + * @throws Exception + */ + @Test + public void testFullRestoreMultipleEmpty() throws Exception { + LOG.info("create full backup image on multiple tables"); + + List tables = toList(table2.getNameAsString(), table3.getNameAsString()); + String backupId = getBackupClient().create(BackupType.FULL, tables,BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupId)); + TableName[] restore_tableset = new TableName[] { table2, table3}; + TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; + Path path = new Path(BACKUP_ROOT_DIR); + HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId); + getRestoreClient().restore(hbfs, BACKUP_ROOT_DIR, backupId, false, false, restore_tableset, + tablemap, + false); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table2_restore)); + assertTrue(hba.tableExists(table3_restore)); + TEST_UTIL.deleteTable(table2_restore); + TEST_UTIL.deleteTable(table3_restore); + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupUtility.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupUtility.java new file mode 100644 index 0000000..3e96f66 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupUtility.java @@ -0,0 +1,169 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URLDecoder; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.TreeMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * A collection of methods used by multiple classes to backup HBase tables. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public final class BackupUtility { + protected static final Log LOG = LogFactory.getLog(BackupUtility.class); + public static final String LOGNAME_SEPARATOR = "."; + + private BackupUtility(){ + throw new AssertionError("Instantiating utility class..."); + } + + /** + * Check whether the backup path exist + * @param backupStr backup + * @param conf configuration + * @return Yes if path exists + * @throws IOException exception + */ + public static boolean checkPathExist(String backupStr, Configuration conf) + throws IOException { + boolean isExist = false; + Path backupPath = new Path(backupStr); + FileSystem fileSys = backupPath.getFileSystem(conf); + String targetFsScheme = fileSys.getUri().getScheme(); + if (LOG.isTraceEnabled()) { + LOG.trace("Schema of given url: " + backupStr + " is: " + targetFsScheme); + } + if (fileSys.exists(backupPath)) { + isExist = true; + } + return isExist; + } + + // check target path first, confirm it doesn't exist before backup + public static void checkTargetDir(String backupRootPath, Configuration conf) throws IOException { + boolean targetExists = false; + try { + targetExists = checkPathExist(backupRootPath, conf); + } catch (IOException e) { + String expMsg = e.getMessage(); + String newMsg = null; + if (expMsg.contains("No FileSystem for scheme")) { + newMsg = + "Unsupported filesystem scheme found in the backup target url. Error Message: " + + newMsg; + LOG.error(newMsg); + throw new IOException(newMsg); + } else { + throw e; + } + } catch (RuntimeException e) { + LOG.error(e.getMessage()); + throw e; + } + + if (targetExists) { + LOG.info("Using existing backup root dir: " + backupRootPath); + } else { + LOG.info("Backup root dir " + backupRootPath + " does not exist. Will be created."); + } + } + + /** + * Get the min value for all the Values a map. + * @param map map + * @return the min value + */ + public static Long getMinValue(HashMap map) { + Long minTimestamp = null; + if (map != null) { + ArrayList timestampList = new ArrayList(map.values()); + Collections.sort(timestampList); + // The min among all the RS log timestamps will be kept in hbase:backup table. + minTimestamp = timestampList.get(0); + } + return minTimestamp; + } + + /** + * TODO: verify the code + * @param p path + * @return host name + * @throws IOException exception + */ + public static String parseHostFromOldLog(Path p) throws IOException { + String n = p.getName(); + int idx = n.lastIndexOf(LOGNAME_SEPARATOR); + String s = URLDecoder.decode(n.substring(0, idx), "UTF8"); + return ServerName.parseHostname(s) + ":" + ServerName.parsePort(s); + } + + /** + * Given the log file, parse the timestamp from the file name. The timestamp is the last number. + * @param p a path to the log file + * @return the timestamp + * @throws IOException exception + */ + public static Long getCreationTime(Path p) throws IOException { + int idx = p.getName().lastIndexOf(LOGNAME_SEPARATOR); + if (idx < 0) { + throw new IOException("Cannot parse timestamp from path " + p); + } + String ts = p.getName().substring(idx + 1); + return Long.parseLong(ts); + } + + public static List getFiles(FileSystem fs, Path rootDir, List files, + PathFilter filter) throws FileNotFoundException, IOException { + RemoteIterator it = fs.listFiles(rootDir, true); + + while (it.hasNext()) { + LocatedFileStatus lfs = it.next(); + if (lfs.isDirectory()) { + continue; + } + // apply filter + if (filter.accept(lfs.getPath())) { + files.add(lfs.getPath().toString()); + } + } + return files; + } +}