diff --git a/bin/hbase b/bin/hbase
index 7faaa26..83b91c0 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -101,6 +101,8 @@ if [ $# = 0 ]; then
echo " ltt Run LoadTestTool"
echo " canary Run the Canary tool"
echo " version Print the version"
+ echo " backup backup tables for recovery"
+ echo " restore restore tables from existing backup image"
echo " CLASSNAME Run the class named CLASSNAME"
exit 1
fi
@@ -313,6 +315,10 @@ elif [ "$COMMAND" = "hfile" ] ; then
CLASS='org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter'
elif [ "$COMMAND" = "zkcli" ] ; then
CLASS="org.apache.hadoop.hbase.zookeeper.ZooKeeperMainServer"
+elif [ "$COMMAND" = "backup" ] ; then
+ CLASS='org.apache.hadoop.hbase.backup.BackupDriver'
+elif [ "$COMMAND" = "restore" ] ; then
+ CLASS='org.apache.hadoop.hbase.backup.RestoreDriver'
elif [ "$COMMAND" = "upgrade" ] ; then
echo "This command was used to upgrade to HBase 0.96, it was removed in HBase 2.0.0."
echo "Please follow the documentation at http://hbase.apache.org/book.html#upgrading."
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 0c6244f..9ce8de3 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1262,6 +1262,14 @@ public final class HConstants {
public static final String DEFAULT_TEMPORARY_HDFS_DIRECTORY = "/user/"
+ System.getProperty("user.name") + "/hbase-staging";
+ /**
+ * Backup/Restore constants
+ */
+ public final static String BACKUP_ENABLE_KEY = "hbase.backup.enable";
+ public final static boolean BACKUP_ENABLE_DEFAULT = true;
+ public final static String BACKUP_SYSTEM_TTL_KEY = "hbase.backup.system.ttl";
+ public final static int BACKUP_SYSTEM_TTL_DEFAULT = FOREVER;
+
private HConstants() {
// Can't be instantiated with this ctor.
}
diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml
index d43b7b9..a1e78dd 100644
--- a/hbase-protocol/pom.xml
+++ b/hbase-protocol/pom.xml
@@ -171,6 +171,7 @@
Admin.proto
Aggregate.proto
Authentication.proto
+ Backup.proto
Cell.proto
Client.proto
ClusterId.proto
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index d5f1e30..e4b296a 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -394,6 +394,11 @@
${project.version}
true
+
+ org.apache.hadoop
+ hadoop-distcp
+ ${hadoop-two.version}
+
commons-httpclient
commons-httpclient
@@ -407,6 +412,11 @@
commons-collections
+ org.apache.hadoop
+ hadoop-distcp
+ ${hadoop-two.version}
+
+
org.apache.hbase
hbase-hadoop-compat
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java
index ae36f08..3342743 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java
@@ -17,7 +17,11 @@
*/
package org.apache.hadoop.hbase.coordination;
+import java.io.IOException;
+
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs;
+import org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.Server;
@@ -51,8 +55,21 @@ public abstract class BaseCoordinatedStateManager implements CoordinatedStateMan
* Method to retrieve coordination for split log worker
*/
public abstract SplitLogWorkerCoordination getSplitLogWorkerCoordination();
+
/**
* Method to retrieve coordination for split log manager
*/
public abstract SplitLogManagerCoordination getSplitLogManagerCoordination();
+ /**
+ * Method to retrieve {@link org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs}
+ */
+ public abstract ProcedureCoordinatorRpcs
+ getProcedureCoordinatorRpcs(String procType, String coordNode) throws IOException;
+
+ /**
+ * Method to retrieve {@link org.apache.hadoop.hbase.procedure.ProcedureMemberRpc}
+ */
+ public abstract ProcedureMemberRpcs
+ getProcedureMemberRpcs(String procType) throws IOException;
+
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java
index 3e89be7..7cf4aab 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java
@@ -17,9 +17,15 @@
*/
package org.apache.hadoop.hbase.coordination;
+import java.io.IOException;
+
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs;
+import org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs;
+import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinatorRpcs;
+import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
/**
@@ -49,9 +55,21 @@ public class ZkCoordinatedStateManager extends BaseCoordinatedStateManager {
@Override
public SplitLogWorkerCoordination getSplitLogWorkerCoordination() {
return splitLogWorkerCoordination;
- }
+ }
+
@Override
public SplitLogManagerCoordination getSplitLogManagerCoordination() {
return splitLogManagerCoordination;
}
+
+ @Override
+ public ProcedureCoordinatorRpcs getProcedureCoordinatorRpcs(String procType, String coordNode)
+ throws IOException {
+ return new ZKProcedureCoordinatorRpcs(watcher, procType, coordNode);
+ }
+
+ @Override
+ public ProcedureMemberRpcs getProcedureMemberRpcs(String procType) throws IOException {
+ return new ZKProcedureMemberRpcs(watcher, procType);
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
index 9d9cee0..2ceeda5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
@@ -85,6 +85,9 @@ public class WALPlayer extends Configured implements Tool {
private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name";
+ public WALPlayer(){
+ }
+
protected WALPlayer(final Configuration c) {
super(c);
}
@@ -94,7 +97,7 @@ public class WALPlayer extends Configured implements Tool {
* This one can be used together with {@link KeyValueSortReducer}
*/
static class WALKeyValueMapper
- extends Mapper {
+ extends Mapper {
private byte[] table;
@Override
@@ -106,7 +109,9 @@ public class WALPlayer extends Configured implements Tool {
if (Bytes.equals(table, key.getTablename().getName())) {
for (Cell cell : value.getCells()) {
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
- if (WALEdit.isMetaEditFamily(kv)) continue;
+ if (WALEdit.isMetaEditFamily(kv)) {
+ continue;
+ }
context.write(new ImmutableBytesWritable(CellUtil.cloneRow(kv)), kv);
}
}
@@ -132,7 +137,7 @@ public class WALPlayer extends Configured implements Tool {
* a running HBase instance.
*/
protected static class WALMapper
- extends Mapper {
+ extends Mapper {
private Map tables = new TreeMap();
@Override
@@ -149,7 +154,9 @@ public class WALPlayer extends Configured implements Tool {
Cell lastCell = null;
for (Cell cell : value.getCells()) {
// filtering WAL meta entries
- if (WALEdit.isMetaEditFamily(cell)) continue;
+ if (WALEdit.isMetaEditFamily(cell)) {
+ continue;
+ }
// Allow a subclass filter out this cell.
if (filter(context, cell)) {
@@ -160,8 +167,12 @@ public class WALPlayer extends Configured implements Tool {
if (lastCell == null || lastCell.getTypeByte() != cell.getTypeByte()
|| !CellUtil.matchingRow(lastCell, cell)) {
// row or type changed, write out aggregate KVs.
- if (put != null) context.write(tableOut, put);
- if (del != null) context.write(tableOut, del);
+ if (put != null) {
+ context.write(tableOut, put);
+ }
+ if (del != null) {
+ context.write(tableOut, del);
+ }
if (CellUtil.isDelete(cell)) {
del = new Delete(CellUtil.cloneRow(cell));
} else {
@@ -177,8 +188,12 @@ public class WALPlayer extends Configured implements Tool {
lastCell = cell;
}
// write residual KVs
- if (put != null) context.write(tableOut, put);
- if (del != null) context.write(tableOut, del);
+ if (put != null) {
+ context.write(tableOut, put);
+ }
+ if (del != null) {
+ context.write(tableOut, del);
+ }
}
} catch (InterruptedException e) {
e.printStackTrace();
@@ -186,7 +201,8 @@ public class WALPlayer extends Configured implements Tool {
}
/**
- * @param cell
+ * Filter cell
+ * @param cell cell
* @return Return true if we are to emit this cell.
*/
protected boolean filter(Context context, final Cell cell) {
@@ -197,9 +213,7 @@ public class WALPlayer extends Configured implements Tool {
public void setup(Context context) throws IOException {
String[] tableMap = context.getConfiguration().getStrings(TABLE_MAP_KEY);
String[] tablesToUse = context.getConfiguration().getStrings(TABLES_KEY);
- if (tablesToUse == null && tableMap == null) {
- // Then user wants all tables.
- } else if (tablesToUse == null || tableMap == null || tablesToUse.length != tableMap.length) {
+ if (tablesToUse == null || tableMap == null || tablesToUse.length != tableMap.length) {
// this can only happen when WALMapper is used directly by a class other than WALPlayer
throw new IOException("No tables or incorrect table mapping specified.");
}
@@ -215,7 +229,9 @@ public class WALPlayer extends Configured implements Tool {
void setupTime(Configuration conf, String option) throws IOException {
String val = conf.get(option);
- if (null == val) return;
+ if (null == val) {
+ return;
+ }
long ms;
try {
// first try to parse in user friendly form
@@ -295,7 +311,8 @@ public class WALPlayer extends Configured implements Tool {
return job;
}
- /*
+ /**
+ * Print usage
* @param errorMsg Error message. Can be null.
*/
private void usage(final String errorMsg) {
@@ -305,7 +322,8 @@ public class WALPlayer extends Configured implements Tool {
System.err.println("Usage: " + NAME + " [options] []");
System.err.println("Read all WAL entries for .");
System.err.println("If no tables (\"\") are specific, all tables are imported.");
- System.err.println("(Careful, even -ROOT- and hbase:meta entries will be imported in that case.)");
+ System.err.println("(Careful, even -ROOT- and hbase:meta entries will be imported"+
+ " in that case.)");
System.err.println("Otherwise is a comma separated list of tables.\n");
System.err.println("The WAL entries can be mapped to new set of tables via .");
System.err.println(" is a command separated list of targettables.");
@@ -318,10 +336,10 @@ public class WALPlayer extends Configured implements Tool {
System.err.println(" -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]");
System.err.println(" -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]");
System.err.println(" -D " + JOB_NAME_CONF_KEY
- + "=jobName - use the specified mapreduce job name for the wal player");
+ + "=jobName - use the specified mapreduce job name for the wal player");
System.err.println("For performance also consider the following options:\n"
- + " -Dmapreduce.map.speculative=false\n"
- + " -Dmapreduce.reduce.speculative=false");
+ + " -Dmapreduce.map.speculative=false\n"
+ + " -Dmapreduce.reduce.speculative=false");
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 7942b28..e47aca3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -82,6 +82,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.UnknownRegionException;
+import org.apache.hadoop.hbase.backup.impl.BackupManager;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
@@ -392,6 +393,7 @@ public class HMaster extends HRegionServer implements MasterServices {
this.conf.setBoolean(HConstants.USE_META_REPLICAS, false);
Replication.decorateMasterConfiguration(this.conf);
+ BackupManager.decorateMasterConfiguration(this.conf);
// Hack! Maps DFSClient => Master for logs. HDFS made this
// config param for task trackers, but we can piggyback off of it.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java
index 95c3ffe..b6e11ea 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java
@@ -37,7 +37,7 @@ public abstract class RegionServerProcedureManager extends ProcedureManager {
* @param rss Region Server service interface
* @throws KeeperException
*/
- public abstract void initialize(RegionServerServices rss) throws KeeperException;
+ public abstract void initialize(RegionServerServices rss) throws IOException;
/**
* Start accepting procedure requests.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
index 0f4ea64..adb3604 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.procedure.flush.RegionServerFlushTableProcedureManager;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager;
-import org.apache.zookeeper.KeeperException;
/**
* Provides the globally barriered procedure framework and environment
@@ -39,7 +38,7 @@ public class RegionServerProcedureManagerHost extends
private static final Log LOG = LogFactory
.getLog(RegionServerProcedureManagerHost.class);
- public void initialize(RegionServerServices rss) throws KeeperException {
+ public void initialize(RegionServerServices rss) throws IOException {
for (RegionServerProcedureManager proc : procedures) {
LOG.debug("Procedure " + proc.getProcedureSignature() + " is initializing");
proc.initialize(rss);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java
index 085d642..3865ba9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java
@@ -54,7 +54,7 @@ public class ZKProcedureCoordinatorRpcs implements ProcedureCoordinatorRpcs {
* @throws KeeperException if an unexpected zk error occurs
*/
public ZKProcedureCoordinatorRpcs(ZooKeeperWatcher watcher,
- String procedureClass, String coordName) throws KeeperException {
+ String procedureClass, String coordName) throws IOException {
this.watcher = watcher;
this.procedureType = procedureClass;
this.coordName = coordName;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java
index 2e03a60..9b491fd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java
@@ -68,49 +68,53 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs {
* @throws KeeperException if we can't reach zookeeper
*/
public ZKProcedureMemberRpcs(final ZooKeeperWatcher watcher, final String procType)
- throws KeeperException {
- this.zkController = new ZKProcedureUtil(watcher, procType) {
- @Override
- public void nodeCreated(String path) {
- if (!isInProcedurePath(path)) {
- return;
- }
+ throws IOException {
+ try {
+ this.zkController = new ZKProcedureUtil(watcher, procType) {
+ @Override
+ public void nodeCreated(String path) {
+ if (!isInProcedurePath(path)) {
+ return;
+ }
- LOG.info("Received created event:" + path);
- // if it is a simple start/end/abort then we just rewatch the node
- if (isAcquiredNode(path)) {
- waitForNewProcedures();
- return;
- } else if (isAbortNode(path)) {
- watchForAbortedProcedures();
- return;
+ LOG.info("Received created event:" + path);
+ // if it is a simple start/end/abort then we just rewatch the node
+ if (isAcquiredNode(path)) {
+ waitForNewProcedures();
+ return;
+ } else if (isAbortNode(path)) {
+ watchForAbortedProcedures();
+ return;
+ }
+ String parent = ZKUtil.getParent(path);
+ // if its the end barrier, the procedure can be completed
+ if (isReachedNode(parent)) {
+ receivedReachedGlobalBarrier(path);
+ return;
+ } else if (isAbortNode(parent)) {
+ abort(path);
+ return;
+ } else if (isAcquiredNode(parent)) {
+ startNewSubprocedure(path);
+ } else {
+ LOG.debug("Ignoring created notification for node:" + path);
+ }
}
- String parent = ZKUtil.getParent(path);
- // if its the end barrier, the procedure can be completed
- if (isReachedNode(parent)) {
- receivedReachedGlobalBarrier(path);
- return;
- } else if (isAbortNode(parent)) {
- abort(path);
- return;
- } else if (isAcquiredNode(parent)) {
- startNewSubprocedure(path);
- } else {
- LOG.debug("Ignoring created notification for node:" + path);
- }
- }
- @Override
- public void nodeChildrenChanged(String path) {
- if (path.equals(this.acquiredZnode)) {
- LOG.info("Received procedure start children changed event: " + path);
- waitForNewProcedures();
- } else if (path.equals(this.abortZnode)) {
- LOG.info("Received procedure abort children changed event: " + path);
- watchForAbortedProcedures();
+ @Override
+ public void nodeChildrenChanged(String path) {
+ if (path.equals(this.acquiredZnode)) {
+ LOG.info("Received procedure start children changed event: " + path);
+ waitForNewProcedures();
+ } else if (path.equals(this.abortZnode)) {
+ LOG.info("Received procedure abort children changed event: " + path);
+ watchForAbortedProcedures();
+ }
}
- }
- };
+ };
+ } catch (KeeperException e) {
+ throw new IOException(e);
+ }
}
public ZKProcedureUtil getZkController() {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java
index 1aa959c..bd65cc7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java
@@ -317,7 +317,7 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur
* @throws KeeperException if the zookeeper cannot be reached
*/
@Override
- public void initialize(RegionServerServices rss) throws KeeperException {
+ public void initialize(RegionServerServices rss) throws IOException {
this.rss = rss;
ZooKeeperWatcher zkw = rss.getZooKeeper();
this.memberRpcs = new ZKProcedureMemberRpcs(zkw,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 4ab2693..0ce8ee4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -828,8 +828,8 @@ public class HRegionServer extends HasThread implements
rspmHost = new RegionServerProcedureManagerHost();
rspmHost.loadProcedures(conf);
rspmHost.initialize(this);
- } catch (KeeperException e) {
- this.abort("Failed to reach zk cluster when creating procedure handler.", e);
+ } catch (IOException e) {
+ this.abort("Failed to reach coordination cluster when creating procedure handler.", e);
}
// register watcher for recovering regions
this.recoveringRegionWatcher = new RecoveringRegionWatcher(this.zooKeeper, this);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
index 537329a..e56dd28 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java
@@ -390,7 +390,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager {
* @throws KeeperException if the zookeeper cluster cannot be reached
*/
@Override
- public void initialize(RegionServerServices rss) throws KeeperException {
+ public void initialize(RegionServerServices rss) throws IOException {
this.rss = rss;
ZooKeeperWatcher zkw = rss.getZooKeeper();
this.memberRpcs = new ZKProcedureMemberRpcs(zkw,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
index f3f869c..31f05c2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
@@ -96,6 +96,8 @@ import com.lmax.disruptor.TimeoutException;
import com.lmax.disruptor.dsl.Disruptor;
import com.lmax.disruptor.dsl.ProducerType;
+
+
/**
* Implementation of {@link WAL} to go against {@link FileSystem}; i.e. keep WALs in HDFS.
* Only one WAL is ever being written at a time. When a WAL hits a configured maximum size,
@@ -356,7 +358,9 @@ public class FSHLog implements WAL {
public int compare(Path o1, Path o2) {
long t1 = getFileNumFromFileName(o1);
long t2 = getFileNumFromFileName(o2);
- if (t1 == t2) return 0;
+ if (t1 == t2) {
+ return 0;
+ }
return (t1 > t2) ? 1 : -1;
}
};
@@ -399,7 +403,7 @@ public class FSHLog implements WAL {
* @param root path for stored and archived wals
* @param logDir dir where wals are stored
* @param conf configuration to use
- * @throws IOException
+ * @throws IOException exception
*/
public FSHLog(final FileSystem fs, final Path root, final String logDir, final Configuration conf)
throws IOException {
@@ -407,7 +411,7 @@ public class FSHLog implements WAL {
}
/**
- * Create an edit log at the given dir location.
+ * Create an edit log at the given directory location.
*
* You should never have to load an existing log. If there is a log at
* startup, it should have already been processed and deleted by the time the
@@ -422,13 +426,13 @@ public class FSHLog implements WAL {
* be registered before we do anything else; e.g. the
* Constructor {@link #rollWriter()}.
* @param failIfWALExists If true IOException will be thrown if files related to this wal
- * already exist.
+ * already exist.
* @param prefix should always be hostname and port in distributed env and
- * it will be URL encoded before being used.
- * If prefix is null, "wal" will be used
+ * it will be URL encoded before being used.
+ * If prefix is null, "wal" will be used
* @param suffix will be url encoded. null is treated as empty. non-empty must start with
- * {@link DefaultWALProvider#WAL_FILE_NAME_DELIMITER}
- * @throws IOException
+ * {@link DefaultWALProvider#WAL_FILE_NAME_DELIMITER}
+ * @throws IOException exception
*/
public FSHLog(final FileSystem fs, final Path rootDir, final String logDir,
final String archiveDir, final Configuration conf,
@@ -590,7 +594,9 @@ public class FSHLog implements WAL {
@VisibleForTesting
OutputStream getOutputStream() {
FSDataOutputStream fsdos = this.hdfs_out;
- if (fsdos == null) return null;
+ if (fsdos == null) {
+ return null;
+ }
return fsdos.getWrappedStream();
}
@@ -625,7 +631,7 @@ public class FSHLog implements WAL {
/**
* Tell listeners about pre log roll.
- * @throws IOException
+ * @throws IOException exception
*/
private void tellListenersAboutPreLogRoll(final Path oldPath, final Path newPath)
throws IOException {
@@ -638,7 +644,7 @@ public class FSHLog implements WAL {
/**
* Tell listeners about post log roll.
- * @throws IOException
+ * @throws IOException exception
*/
private void tellListenersAboutPostLogRoll(final Path oldPath, final Path newPath)
throws IOException {
@@ -651,8 +657,7 @@ public class FSHLog implements WAL {
/**
* Run a sync after opening to set up the pipeline.
- * @param nextWriter
- * @param startTimeNanos
+ * @param nextWriter next writer
*/
private void preemptiveSync(final ProtobufLogWriter nextWriter) {
long startTimeNanos = System.nanoTime();
@@ -670,7 +675,9 @@ public class FSHLog implements WAL {
rollWriterLock.lock();
try {
// Return if nothing to flush.
- if (!force && (this.writer != null && this.numEntries.get() <= 0)) return null;
+ if (!force && (this.writer != null && this.numEntries.get() <= 0)) {
+ return null;
+ }
byte [][] regionsToFlush = null;
if (this.closed) {
LOG.debug("WAL closed. Skipping rolling of writer");
@@ -725,7 +732,7 @@ public class FSHLog implements WAL {
/**
* Archive old logs. A WAL is eligible for archiving if all its WALEdits have been flushed.
- * @throws IOException
+ * @throws IOException exception
*/
private void cleanOldLogs() throws IOException {
List logsToArchive = null;
@@ -735,9 +742,13 @@ public class FSHLog implements WAL {
Path log = e.getKey();
Map sequenceNums = e.getValue();
if (this.sequenceIdAccounting.areAllLower(sequenceNums)) {
- if (logsToArchive == null) logsToArchive = new ArrayList();
+ if (logsToArchive == null) {
+ logsToArchive = new ArrayList();
+ }
logsToArchive.add(log);
- if (LOG.isTraceEnabled()) LOG.trace("WAL file ready for archiving " + log);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("WAL file ready for archiving " + log);
+ }
}
}
if (logsToArchive != null) {
@@ -767,7 +778,9 @@ public class FSHLog implements WAL {
if (regions != null) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < regions.length; i++) {
- if (i > 0) sb.append(", ");
+ if (i > 0) {
+ sb.append(", ");
+ }
sb.append(Bytes.toStringBinary(regions[i]));
}
LOG.info("Too many WALs; count=" + logCount + ", max=" + this.maxLogs +
@@ -833,7 +846,9 @@ public class FSHLog implements WAL {
}
} catch (FailedSyncBeforeLogCloseException e) {
// If unflushed/unsynced entries on close, it is reason to abort.
- if (isUnflushedEntries()) throw e;
+ if (isUnflushedEntries()) {
+ throw e;
+ }
LOG.warn("Failed sync-before-close but no outstanding appends; closing WAL: " +
e.getMessage());
}
@@ -894,7 +909,9 @@ public class FSHLog implements WAL {
try {
blockOnSync(syncFuture);
} catch (IOException ioe) {
- if (LOG.isTraceEnabled()) LOG.trace("Stale sync exception", ioe);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Stale sync exception", ioe);
+ }
}
}
}
@@ -965,7 +982,15 @@ public class FSHLog implements WAL {
public Path getCurrentFileName() {
return computeFilename(this.filenum.get());
}
-
+
+ /**
+ * To support old API compatibility
+ * @return current file number (timestamp)
+ */
+ public long getFilenum() {
+ return filenum.get();
+ }
+
@Override
public String toString() {
return "FSHLog " + logFilePrefix + ":" + logFileSuffix + "(num " + filenum + ")";
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
index 027e7a2..dd4d337 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
@@ -209,13 +209,18 @@ public class DefaultWALProvider implements WALProvider {
@VisibleForTesting
public static long extractFileNumFromWAL(final WAL wal) {
final Path walName = ((FSHLog)wal).getCurrentFileName();
+ return extractFileNumFromWAL(walName);
+ }
+
+ @VisibleForTesting
+ public static long extractFileNumFromWAL(final Path walName) {
if (walName == null) {
throw new IllegalArgumentException("The WAL path couldn't be null");
}
final String[] walPathStrs = walName.toString().split("\\" + WAL_FILE_NAME_DELIMITER);
return Long.parseLong(walPathStrs[walPathStrs.length - (isMetaFile(walName) ? 2:1)]);
}
-
+
/**
* Pattern used to validate a WAL file name
* see {@link #validateWALFilename(String)} for description.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java
index 7620bbb..cd2efad 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java
@@ -49,7 +49,7 @@ public class SimpleRSProcedureManager extends RegionServerProcedureManager {
private ProcedureMember member;
@Override
- public void initialize(RegionServerServices rss) throws KeeperException {
+ public void initialize(RegionServerServices rss) throws IOException {
this.rss = rss;
ZooKeeperWatcher zkw = rss.getZooKeeper();
this.memberRpcs = new ZKProcedureMemberRpcs(zkw, getProcedureSignature());
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java
new file mode 100644
index 0000000..1a7a1ba
--- /dev/null
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java
@@ -0,0 +1,9143 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: Backup.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class BackupProtos {
+ private BackupProtos() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ /**
+ * Protobuf enum {@code hbase.pb.BackupType}
+ */
+ public enum BackupType
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * FULL = 0;
+ */
+ FULL(0, 0),
+ /**
+ * INCREMENTAL = 1;
+ */
+ INCREMENTAL(1, 1),
+ ;
+
+ /**
+ * FULL = 0;
+ */
+ public static final int FULL_VALUE = 0;
+ /**
+ * INCREMENTAL = 1;
+ */
+ public static final int INCREMENTAL_VALUE = 1;
+
+
+ public final int getNumber() { return value; }
+
+ public static BackupType valueOf(int value) {
+ switch (value) {
+ case 0: return FULL;
+ case 1: return INCREMENTAL;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public BackupType findValueByNumber(int number) {
+ return BackupType.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(0);
+ }
+
+ private static final BackupType[] VALUES = values();
+
+ public static BackupType valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private BackupType(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:hbase.pb.BackupType)
+ }
+
+ public interface BackupImageOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string backup_id = 1;
+ /**
+ * required string backup_id = 1;
+ */
+ boolean hasBackupId();
+ /**
+ * required string backup_id = 1;
+ */
+ java.lang.String getBackupId();
+ /**
+ * required string backup_id = 1;
+ */
+ com.google.protobuf.ByteString
+ getBackupIdBytes();
+
+ // required .hbase.pb.BackupType backup_type = 2;
+ /**
+ * required .hbase.pb.BackupType backup_type = 2;
+ */
+ boolean hasBackupType();
+ /**
+ * required .hbase.pb.BackupType backup_type = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType();
+
+ // required string root_dir = 3;
+ /**
+ * required string root_dir = 3;
+ */
+ boolean hasRootDir();
+ /**
+ * required string root_dir = 3;
+ */
+ java.lang.String getRootDir();
+ /**
+ * required string root_dir = 3;
+ */
+ com.google.protobuf.ByteString
+ getRootDirBytes();
+
+ // repeated .hbase.pb.TableName table_list = 4;
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ java.util.List
+ getTableListList();
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index);
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ int getTableListCount();
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableListOrBuilderList();
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder(
+ int index);
+
+ // required uint64 start_ts = 5;
+ /**
+ * required uint64 start_ts = 5;
+ */
+ boolean hasStartTs();
+ /**
+ * required uint64 start_ts = 5;
+ */
+ long getStartTs();
+
+ // required uint64 complete_ts = 6;
+ /**
+ * required uint64 complete_ts = 6;
+ */
+ boolean hasCompleteTs();
+ /**
+ * required uint64 complete_ts = 6;
+ */
+ long getCompleteTs();
+
+ // repeated .hbase.pb.BackupImage ancestors = 7;
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ java.util.List
+ getAncestorsList();
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getAncestors(int index);
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ int getAncestorsCount();
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder>
+ getAncestorsOrBuilderList();
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getAncestorsOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code hbase.pb.BackupImage}
+ */
+ public static final class BackupImage extends
+ com.google.protobuf.GeneratedMessage
+ implements BackupImageOrBuilder {
+ // Use BackupImage.newBuilder() to construct.
+ private BackupImage(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private BackupImage(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final BackupImage defaultInstance;
+ public static BackupImage getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public BackupImage getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private BackupImage(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ backupId_ = input.readBytes();
+ break;
+ }
+ case 16: {
+ int rawValue = input.readEnum();
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(2, rawValue);
+ } else {
+ bitField0_ |= 0x00000002;
+ backupType_ = value;
+ }
+ break;
+ }
+ case 26: {
+ bitField0_ |= 0x00000004;
+ rootDir_ = input.readBytes();
+ break;
+ }
+ case 34: {
+ if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
+ tableList_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000008;
+ }
+ tableList_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry));
+ break;
+ }
+ case 40: {
+ bitField0_ |= 0x00000008;
+ startTs_ = input.readUInt64();
+ break;
+ }
+ case 48: {
+ bitField0_ |= 0x00000010;
+ completeTs_ = input.readUInt64();
+ break;
+ }
+ case 58: {
+ if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+ ancestors_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000040;
+ }
+ ancestors_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
+ tableList_ = java.util.Collections.unmodifiableList(tableList_);
+ }
+ if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+ ancestors_ = java.util.Collections.unmodifiableList(ancestors_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public BackupImage parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new BackupImage(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string backup_id = 1;
+ public static final int BACKUP_ID_FIELD_NUMBER = 1;
+ private java.lang.Object backupId_;
+ /**
+ * required string backup_id = 1;
+ */
+ public boolean hasBackupId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string backup_id = 1;
+ */
+ public java.lang.String getBackupId() {
+ java.lang.Object ref = backupId_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ backupId_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string backup_id = 1;
+ */
+ public com.google.protobuf.ByteString
+ getBackupIdBytes() {
+ java.lang.Object ref = backupId_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ backupId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required .hbase.pb.BackupType backup_type = 2;
+ public static final int BACKUP_TYPE_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType backupType_;
+ /**
+ * required .hbase.pb.BackupType backup_type = 2;
+ */
+ public boolean hasBackupType() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .hbase.pb.BackupType backup_type = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType() {
+ return backupType_;
+ }
+
+ // required string root_dir = 3;
+ public static final int ROOT_DIR_FIELD_NUMBER = 3;
+ private java.lang.Object rootDir_;
+ /**
+ * required string root_dir = 3;
+ */
+ public boolean hasRootDir() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required string root_dir = 3;
+ */
+ public java.lang.String getRootDir() {
+ java.lang.Object ref = rootDir_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ rootDir_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string root_dir = 3;
+ */
+ public com.google.protobuf.ByteString
+ getRootDirBytes() {
+ java.lang.Object ref = rootDir_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ rootDir_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // repeated .hbase.pb.TableName table_list = 4;
+ public static final int TABLE_LIST_FIELD_NUMBER = 4;
+ private java.util.List tableList_;
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public java.util.List getTableListList() {
+ return tableList_;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableListOrBuilderList() {
+ return tableList_;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public int getTableListCount() {
+ return tableList_.size();
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) {
+ return tableList_.get(index);
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder(
+ int index) {
+ return tableList_.get(index);
+ }
+
+ // required uint64 start_ts = 5;
+ public static final int START_TS_FIELD_NUMBER = 5;
+ private long startTs_;
+ /**
+ * required uint64 start_ts = 5;
+ */
+ public boolean hasStartTs() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * required uint64 start_ts = 5;
+ */
+ public long getStartTs() {
+ return startTs_;
+ }
+
+ // required uint64 complete_ts = 6;
+ public static final int COMPLETE_TS_FIELD_NUMBER = 6;
+ private long completeTs_;
+ /**
+ * required uint64 complete_ts = 6;
+ */
+ public boolean hasCompleteTs() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * required uint64 complete_ts = 6;
+ */
+ public long getCompleteTs() {
+ return completeTs_;
+ }
+
+ // repeated .hbase.pb.BackupImage ancestors = 7;
+ public static final int ANCESTORS_FIELD_NUMBER = 7;
+ private java.util.List ancestors_;
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public java.util.List getAncestorsList() {
+ return ancestors_;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder>
+ getAncestorsOrBuilderList() {
+ return ancestors_;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public int getAncestorsCount() {
+ return ancestors_.size();
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getAncestors(int index) {
+ return ancestors_.get(index);
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getAncestorsOrBuilder(
+ int index) {
+ return ancestors_.get(index);
+ }
+
+ private void initFields() {
+ backupId_ = "";
+ backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL;
+ rootDir_ = "";
+ tableList_ = java.util.Collections.emptyList();
+ startTs_ = 0L;
+ completeTs_ = 0L;
+ ancestors_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasBackupId()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasBackupType()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasRootDir()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasStartTs()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasCompleteTs()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getTableListCount(); i++) {
+ if (!getTableList(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ for (int i = 0; i < getAncestorsCount(); i++) {
+ if (!getAncestors(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getBackupIdBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeEnum(2, backupType_.getNumber());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBytes(3, getRootDirBytes());
+ }
+ for (int i = 0; i < tableList_.size(); i++) {
+ output.writeMessage(4, tableList_.get(i));
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeUInt64(5, startTs_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeUInt64(6, completeTs_);
+ }
+ for (int i = 0; i < ancestors_.size(); i++) {
+ output.writeMessage(7, ancestors_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getBackupIdBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(2, backupType_.getNumber());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(3, getRootDirBytes());
+ }
+ for (int i = 0; i < tableList_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(4, tableList_.get(i));
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(5, startTs_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(6, completeTs_);
+ }
+ for (int i = 0; i < ancestors_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(7, ancestors_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) obj;
+
+ boolean result = true;
+ result = result && (hasBackupId() == other.hasBackupId());
+ if (hasBackupId()) {
+ result = result && getBackupId()
+ .equals(other.getBackupId());
+ }
+ result = result && (hasBackupType() == other.hasBackupType());
+ if (hasBackupType()) {
+ result = result &&
+ (getBackupType() == other.getBackupType());
+ }
+ result = result && (hasRootDir() == other.hasRootDir());
+ if (hasRootDir()) {
+ result = result && getRootDir()
+ .equals(other.getRootDir());
+ }
+ result = result && getTableListList()
+ .equals(other.getTableListList());
+ result = result && (hasStartTs() == other.hasStartTs());
+ if (hasStartTs()) {
+ result = result && (getStartTs()
+ == other.getStartTs());
+ }
+ result = result && (hasCompleteTs() == other.hasCompleteTs());
+ if (hasCompleteTs()) {
+ result = result && (getCompleteTs()
+ == other.getCompleteTs());
+ }
+ result = result && getAncestorsList()
+ .equals(other.getAncestorsList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasBackupId()) {
+ hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER;
+ hash = (53 * hash) + getBackupId().hashCode();
+ }
+ if (hasBackupType()) {
+ hash = (37 * hash) + BACKUP_TYPE_FIELD_NUMBER;
+ hash = (53 * hash) + hashEnum(getBackupType());
+ }
+ if (hasRootDir()) {
+ hash = (37 * hash) + ROOT_DIR_FIELD_NUMBER;
+ hash = (53 * hash) + getRootDir().hashCode();
+ }
+ if (getTableListCount() > 0) {
+ hash = (37 * hash) + TABLE_LIST_FIELD_NUMBER;
+ hash = (53 * hash) + getTableListList().hashCode();
+ }
+ if (hasStartTs()) {
+ hash = (37 * hash) + START_TS_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getStartTs());
+ }
+ if (hasCompleteTs()) {
+ hash = (37 * hash) + COMPLETE_TS_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getCompleteTs());
+ }
+ if (getAncestorsCount() > 0) {
+ hash = (37 * hash) + ANCESTORS_FIELD_NUMBER;
+ hash = (53 * hash) + getAncestorsList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.BackupImage}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getTableListFieldBuilder();
+ getAncestorsFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ backupId_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ rootDir_ = "";
+ bitField0_ = (bitField0_ & ~0x00000004);
+ if (tableListBuilder_ == null) {
+ tableList_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000008);
+ } else {
+ tableListBuilder_.clear();
+ }
+ startTs_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ completeTs_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000020);
+ if (ancestorsBuilder_ == null) {
+ ancestors_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000040);
+ } else {
+ ancestorsBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage build() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.backupId_ = backupId_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.backupType_ = backupType_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.rootDir_ = rootDir_;
+ if (tableListBuilder_ == null) {
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ tableList_ = java.util.Collections.unmodifiableList(tableList_);
+ bitField0_ = (bitField0_ & ~0x00000008);
+ }
+ result.tableList_ = tableList_;
+ } else {
+ result.tableList_ = tableListBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.startTs_ = startTs_;
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ result.completeTs_ = completeTs_;
+ if (ancestorsBuilder_ == null) {
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ ancestors_ = java.util.Collections.unmodifiableList(ancestors_);
+ bitField0_ = (bitField0_ & ~0x00000040);
+ }
+ result.ancestors_ = ancestors_;
+ } else {
+ result.ancestors_ = ancestorsBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()) return this;
+ if (other.hasBackupId()) {
+ bitField0_ |= 0x00000001;
+ backupId_ = other.backupId_;
+ onChanged();
+ }
+ if (other.hasBackupType()) {
+ setBackupType(other.getBackupType());
+ }
+ if (other.hasRootDir()) {
+ bitField0_ |= 0x00000004;
+ rootDir_ = other.rootDir_;
+ onChanged();
+ }
+ if (tableListBuilder_ == null) {
+ if (!other.tableList_.isEmpty()) {
+ if (tableList_.isEmpty()) {
+ tableList_ = other.tableList_;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ } else {
+ ensureTableListIsMutable();
+ tableList_.addAll(other.tableList_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.tableList_.isEmpty()) {
+ if (tableListBuilder_.isEmpty()) {
+ tableListBuilder_.dispose();
+ tableListBuilder_ = null;
+ tableList_ = other.tableList_;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ tableListBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getTableListFieldBuilder() : null;
+ } else {
+ tableListBuilder_.addAllMessages(other.tableList_);
+ }
+ }
+ }
+ if (other.hasStartTs()) {
+ setStartTs(other.getStartTs());
+ }
+ if (other.hasCompleteTs()) {
+ setCompleteTs(other.getCompleteTs());
+ }
+ if (ancestorsBuilder_ == null) {
+ if (!other.ancestors_.isEmpty()) {
+ if (ancestors_.isEmpty()) {
+ ancestors_ = other.ancestors_;
+ bitField0_ = (bitField0_ & ~0x00000040);
+ } else {
+ ensureAncestorsIsMutable();
+ ancestors_.addAll(other.ancestors_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.ancestors_.isEmpty()) {
+ if (ancestorsBuilder_.isEmpty()) {
+ ancestorsBuilder_.dispose();
+ ancestorsBuilder_ = null;
+ ancestors_ = other.ancestors_;
+ bitField0_ = (bitField0_ & ~0x00000040);
+ ancestorsBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getAncestorsFieldBuilder() : null;
+ } else {
+ ancestorsBuilder_.addAllMessages(other.ancestors_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasBackupId()) {
+
+ return false;
+ }
+ if (!hasBackupType()) {
+
+ return false;
+ }
+ if (!hasRootDir()) {
+
+ return false;
+ }
+ if (!hasStartTs()) {
+
+ return false;
+ }
+ if (!hasCompleteTs()) {
+
+ return false;
+ }
+ for (int i = 0; i < getTableListCount(); i++) {
+ if (!getTableList(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ for (int i = 0; i < getAncestorsCount(); i++) {
+ if (!getAncestors(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string backup_id = 1;
+ private java.lang.Object backupId_ = "";
+ /**
+ * required string backup_id = 1;
+ */
+ public boolean hasBackupId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string backup_id = 1;
+ */
+ public java.lang.String getBackupId() {
+ java.lang.Object ref = backupId_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ backupId_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string backup_id = 1;
+ */
+ public com.google.protobuf.ByteString
+ getBackupIdBytes() {
+ java.lang.Object ref = backupId_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ backupId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string backup_id = 1;
+ */
+ public Builder setBackupId(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ backupId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string backup_id = 1;
+ */
+ public Builder clearBackupId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ backupId_ = getDefaultInstance().getBackupId();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string backup_id = 1;
+ */
+ public Builder setBackupIdBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ backupId_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required .hbase.pb.BackupType backup_type = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL;
+ /**
+ * required .hbase.pb.BackupType backup_type = 2;
+ */
+ public boolean hasBackupType() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .hbase.pb.BackupType backup_type = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType() {
+ return backupType_;
+ }
+ /**
+ * required .hbase.pb.BackupType backup_type = 2;
+ */
+ public Builder setBackupType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ backupType_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required .hbase.pb.BackupType backup_type = 2;
+ */
+ public Builder clearBackupType() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL;
+ onChanged();
+ return this;
+ }
+
+ // required string root_dir = 3;
+ private java.lang.Object rootDir_ = "";
+ /**
+ * required string root_dir = 3;
+ */
+ public boolean hasRootDir() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required string root_dir = 3;
+ */
+ public java.lang.String getRootDir() {
+ java.lang.Object ref = rootDir_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ rootDir_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string root_dir = 3;
+ */
+ public com.google.protobuf.ByteString
+ getRootDirBytes() {
+ java.lang.Object ref = rootDir_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ rootDir_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string root_dir = 3;
+ */
+ public Builder setRootDir(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ rootDir_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string root_dir = 3;
+ */
+ public Builder clearRootDir() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ rootDir_ = getDefaultInstance().getRootDir();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string root_dir = 3;
+ */
+ public Builder setRootDirBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ rootDir_ = value;
+ onChanged();
+ return this;
+ }
+
+ // repeated .hbase.pb.TableName table_list = 4;
+ private java.util.List tableList_ =
+ java.util.Collections.emptyList();
+ private void ensureTableListIsMutable() {
+ if (!((bitField0_ & 0x00000008) == 0x00000008)) {
+ tableList_ = new java.util.ArrayList(tableList_);
+ bitField0_ |= 0x00000008;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableListBuilder_;
+
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public java.util.List getTableListList() {
+ if (tableListBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(tableList_);
+ } else {
+ return tableListBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public int getTableListCount() {
+ if (tableListBuilder_ == null) {
+ return tableList_.size();
+ } else {
+ return tableListBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) {
+ if (tableListBuilder_ == null) {
+ return tableList_.get(index);
+ } else {
+ return tableListBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public Builder setTableList(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableListBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTableListIsMutable();
+ tableList_.set(index, value);
+ onChanged();
+ } else {
+ tableListBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public Builder setTableList(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableListBuilder_ == null) {
+ ensureTableListIsMutable();
+ tableList_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ tableListBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public Builder addTableList(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableListBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTableListIsMutable();
+ tableList_.add(value);
+ onChanged();
+ } else {
+ tableListBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public Builder addTableList(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableListBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTableListIsMutable();
+ tableList_.add(index, value);
+ onChanged();
+ } else {
+ tableListBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public Builder addTableList(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableListBuilder_ == null) {
+ ensureTableListIsMutable();
+ tableList_.add(builderForValue.build());
+ onChanged();
+ } else {
+ tableListBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public Builder addTableList(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableListBuilder_ == null) {
+ ensureTableListIsMutable();
+ tableList_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ tableListBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public Builder addAllTableList(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> values) {
+ if (tableListBuilder_ == null) {
+ ensureTableListIsMutable();
+ super.addAll(values, tableList_);
+ onChanged();
+ } else {
+ tableListBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public Builder clearTableList() {
+ if (tableListBuilder_ == null) {
+ tableList_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000008);
+ onChanged();
+ } else {
+ tableListBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public Builder removeTableList(int index) {
+ if (tableListBuilder_ == null) {
+ ensureTableListIsMutable();
+ tableList_.remove(index);
+ onChanged();
+ } else {
+ tableListBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableListBuilder(
+ int index) {
+ return getTableListFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder(
+ int index) {
+ if (tableListBuilder_ == null) {
+ return tableList_.get(index); } else {
+ return tableListBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableListOrBuilderList() {
+ if (tableListBuilder_ != null) {
+ return tableListBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(tableList_);
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder() {
+ return getTableListFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder(
+ int index) {
+ return getTableListFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public java.util.List
+ getTableListBuilderList() {
+ return getTableListFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableListFieldBuilder() {
+ if (tableListBuilder_ == null) {
+ tableListBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ tableList_,
+ ((bitField0_ & 0x00000008) == 0x00000008),
+ getParentForChildren(),
+ isClean());
+ tableList_ = null;
+ }
+ return tableListBuilder_;
+ }
+
+ // required uint64 start_ts = 5;
+ private long startTs_ ;
+ /**
+ * required uint64 start_ts = 5;
+ */
+ public boolean hasStartTs() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * required uint64 start_ts = 5;
+ */
+ public long getStartTs() {
+ return startTs_;
+ }
+ /**
+ * required uint64 start_ts = 5;
+ */
+ public Builder setStartTs(long value) {
+ bitField0_ |= 0x00000010;
+ startTs_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required uint64 start_ts = 5;
+ */
+ public Builder clearStartTs() {
+ bitField0_ = (bitField0_ & ~0x00000010);
+ startTs_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // required uint64 complete_ts = 6;
+ private long completeTs_ ;
+ /**
+ * required uint64 complete_ts = 6;
+ */
+ public boolean hasCompleteTs() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * required uint64 complete_ts = 6;
+ */
+ public long getCompleteTs() {
+ return completeTs_;
+ }
+ /**
+ * required uint64 complete_ts = 6;
+ */
+ public Builder setCompleteTs(long value) {
+ bitField0_ |= 0x00000020;
+ completeTs_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required uint64 complete_ts = 6;
+ */
+ public Builder clearCompleteTs() {
+ bitField0_ = (bitField0_ & ~0x00000020);
+ completeTs_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // repeated .hbase.pb.BackupImage ancestors = 7;
+ private java.util.List ancestors_ =
+ java.util.Collections.emptyList();
+ private void ensureAncestorsIsMutable() {
+ if (!((bitField0_ & 0x00000040) == 0x00000040)) {
+ ancestors_ = new java.util.ArrayList(ancestors_);
+ bitField0_ |= 0x00000040;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder> ancestorsBuilder_;
+
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public java.util.List getAncestorsList() {
+ if (ancestorsBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(ancestors_);
+ } else {
+ return ancestorsBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public int getAncestorsCount() {
+ if (ancestorsBuilder_ == null) {
+ return ancestors_.size();
+ } else {
+ return ancestorsBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getAncestors(int index) {
+ if (ancestorsBuilder_ == null) {
+ return ancestors_.get(index);
+ } else {
+ return ancestorsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public Builder setAncestors(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) {
+ if (ancestorsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAncestorsIsMutable();
+ ancestors_.set(index, value);
+ onChanged();
+ } else {
+ ancestorsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public Builder setAncestors(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) {
+ if (ancestorsBuilder_ == null) {
+ ensureAncestorsIsMutable();
+ ancestors_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ ancestorsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public Builder addAncestors(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) {
+ if (ancestorsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAncestorsIsMutable();
+ ancestors_.add(value);
+ onChanged();
+ } else {
+ ancestorsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public Builder addAncestors(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) {
+ if (ancestorsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureAncestorsIsMutable();
+ ancestors_.add(index, value);
+ onChanged();
+ } else {
+ ancestorsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public Builder addAncestors(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) {
+ if (ancestorsBuilder_ == null) {
+ ensureAncestorsIsMutable();
+ ancestors_.add(builderForValue.build());
+ onChanged();
+ } else {
+ ancestorsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public Builder addAncestors(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) {
+ if (ancestorsBuilder_ == null) {
+ ensureAncestorsIsMutable();
+ ancestors_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ ancestorsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public Builder addAllAncestors(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage> values) {
+ if (ancestorsBuilder_ == null) {
+ ensureAncestorsIsMutable();
+ super.addAll(values, ancestors_);
+ onChanged();
+ } else {
+ ancestorsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public Builder clearAncestors() {
+ if (ancestorsBuilder_ == null) {
+ ancestors_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000040);
+ onChanged();
+ } else {
+ ancestorsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public Builder removeAncestors(int index) {
+ if (ancestorsBuilder_ == null) {
+ ensureAncestorsIsMutable();
+ ancestors_.remove(index);
+ onChanged();
+ } else {
+ ancestorsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder getAncestorsBuilder(
+ int index) {
+ return getAncestorsFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getAncestorsOrBuilder(
+ int index) {
+ if (ancestorsBuilder_ == null) {
+ return ancestors_.get(index); } else {
+ return ancestorsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder>
+ getAncestorsOrBuilderList() {
+ if (ancestorsBuilder_ != null) {
+ return ancestorsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(ancestors_);
+ }
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addAncestorsBuilder() {
+ return getAncestorsFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addAncestorsBuilder(
+ int index) {
+ return getAncestorsFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.BackupImage ancestors = 7;
+ */
+ public java.util.List
+ getAncestorsBuilderList() {
+ return getAncestorsFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder>
+ getAncestorsFieldBuilder() {
+ if (ancestorsBuilder_ == null) {
+ ancestorsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder>(
+ ancestors_,
+ ((bitField0_ & 0x00000040) == 0x00000040),
+ getParentForChildren(),
+ isClean());
+ ancestors_ = null;
+ }
+ return ancestorsBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.BackupImage)
+ }
+
+ static {
+ defaultInstance = new BackupImage(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.BackupImage)
+ }
+
+ public interface ServerTimestampOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string server = 1;
+ /**
+ * required string server = 1;
+ */
+ boolean hasServer();
+ /**
+ * required string server = 1;
+ */
+ java.lang.String getServer();
+ /**
+ * required string server = 1;
+ */
+ com.google.protobuf.ByteString
+ getServerBytes();
+
+ // required uint64 timestamp = 2;
+ /**
+ * required uint64 timestamp = 2;
+ */
+ boolean hasTimestamp();
+ /**
+ * required uint64 timestamp = 2;
+ */
+ long getTimestamp();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.ServerTimestamp}
+ */
+ public static final class ServerTimestamp extends
+ com.google.protobuf.GeneratedMessage
+ implements ServerTimestampOrBuilder {
+ // Use ServerTimestamp.newBuilder() to construct.
+ private ServerTimestamp(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private ServerTimestamp(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final ServerTimestamp defaultInstance;
+ public static ServerTimestamp getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public ServerTimestamp getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private ServerTimestamp(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ server_ = input.readBytes();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ timestamp_ = input.readUInt64();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public ServerTimestamp parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new ServerTimestamp(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string server = 1;
+ public static final int SERVER_FIELD_NUMBER = 1;
+ private java.lang.Object server_;
+ /**
+ * required string server = 1;
+ */
+ public boolean hasServer() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string server = 1;
+ */
+ public java.lang.String getServer() {
+ java.lang.Object ref = server_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ server_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string server = 1;
+ */
+ public com.google.protobuf.ByteString
+ getServerBytes() {
+ java.lang.Object ref = server_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ server_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required uint64 timestamp = 2;
+ public static final int TIMESTAMP_FIELD_NUMBER = 2;
+ private long timestamp_;
+ /**
+ * required uint64 timestamp = 2;
+ */
+ public boolean hasTimestamp() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required uint64 timestamp = 2;
+ */
+ public long getTimestamp() {
+ return timestamp_;
+ }
+
+ private void initFields() {
+ server_ = "";
+ timestamp_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasServer()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTimestamp()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getServerBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeUInt64(2, timestamp_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getServerBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(2, timestamp_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp) obj;
+
+ boolean result = true;
+ result = result && (hasServer() == other.hasServer());
+ if (hasServer()) {
+ result = result && getServer()
+ .equals(other.getServer());
+ }
+ result = result && (hasTimestamp() == other.hasTimestamp());
+ if (hasTimestamp()) {
+ result = result && (getTimestamp()
+ == other.getTimestamp());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasServer()) {
+ hash = (37 * hash) + SERVER_FIELD_NUMBER;
+ hash = (53 * hash) + getServer().hashCode();
+ }
+ if (hasTimestamp()) {
+ hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getTimestamp());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.ServerTimestamp}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ server_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ timestamp_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp build() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.server_ = server_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.timestamp_ = timestamp_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance()) return this;
+ if (other.hasServer()) {
+ bitField0_ |= 0x00000001;
+ server_ = other.server_;
+ onChanged();
+ }
+ if (other.hasTimestamp()) {
+ setTimestamp(other.getTimestamp());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasServer()) {
+
+ return false;
+ }
+ if (!hasTimestamp()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string server = 1;
+ private java.lang.Object server_ = "";
+ /**
+ * required string server = 1;
+ */
+ public boolean hasServer() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string server = 1;
+ */
+ public java.lang.String getServer() {
+ java.lang.Object ref = server_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ server_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string server = 1;
+ */
+ public com.google.protobuf.ByteString
+ getServerBytes() {
+ java.lang.Object ref = server_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ server_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string server = 1;
+ */
+ public Builder setServer(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ server_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string server = 1;
+ */
+ public Builder clearServer() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ server_ = getDefaultInstance().getServer();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string server = 1;
+ */
+ public Builder setServerBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ server_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required uint64 timestamp = 2;
+ private long timestamp_ ;
+ /**
+ * required uint64 timestamp = 2;
+ */
+ public boolean hasTimestamp() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required uint64 timestamp = 2;
+ */
+ public long getTimestamp() {
+ return timestamp_;
+ }
+ /**
+ * required uint64 timestamp = 2;
+ */
+ public Builder setTimestamp(long value) {
+ bitField0_ |= 0x00000002;
+ timestamp_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required uint64 timestamp = 2;
+ */
+ public Builder clearTimestamp() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ timestamp_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.ServerTimestamp)
+ }
+
+ static {
+ defaultInstance = new ServerTimestamp(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.ServerTimestamp)
+ }
+
+ public interface TableServerTimestampOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .hbase.pb.TableName table = 1;
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ boolean hasTable();
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable();
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder();
+
+ // repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ java.util.List
+ getServerTimestampList();
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index);
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ int getServerTimestampCount();
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder>
+ getServerTimestampOrBuilderList();
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code hbase.pb.TableServerTimestamp}
+ */
+ public static final class TableServerTimestamp extends
+ com.google.protobuf.GeneratedMessage
+ implements TableServerTimestampOrBuilder {
+ // Use TableServerTimestamp.newBuilder() to construct.
+ private TableServerTimestamp(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private TableServerTimestamp(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final TableServerTimestamp defaultInstance;
+ public static TableServerTimestamp getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TableServerTimestamp getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private TableServerTimestamp(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = table_.toBuilder();
+ }
+ table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(table_);
+ table_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ serverTimestamp_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000002;
+ }
+ serverTimestamp_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ serverTimestamp_ = java.util.Collections.unmodifiableList(serverTimestamp_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public TableServerTimestamp parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new TableServerTimestamp(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .hbase.pb.TableName table = 1;
+ public static final int TABLE_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_;
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public boolean hasTable() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() {
+ return table_;
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() {
+ return table_;
+ }
+
+ // repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ public static final int SERVER_TIMESTAMP_FIELD_NUMBER = 2;
+ private java.util.List serverTimestamp_;
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public java.util.List getServerTimestampList() {
+ return serverTimestamp_;
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder>
+ getServerTimestampOrBuilderList() {
+ return serverTimestamp_;
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public int getServerTimestampCount() {
+ return serverTimestamp_.size();
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index) {
+ return serverTimestamp_.get(index);
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder(
+ int index) {
+ return serverTimestamp_.get(index);
+ }
+
+ private void initFields() {
+ table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ serverTimestamp_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasTable()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTable().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getServerTimestampCount(); i++) {
+ if (!getServerTimestamp(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, table_);
+ }
+ for (int i = 0; i < serverTimestamp_.size(); i++) {
+ output.writeMessage(2, serverTimestamp_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, table_);
+ }
+ for (int i = 0; i < serverTimestamp_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, serverTimestamp_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp) obj;
+
+ boolean result = true;
+ result = result && (hasTable() == other.hasTable());
+ if (hasTable()) {
+ result = result && getTable()
+ .equals(other.getTable());
+ }
+ result = result && getServerTimestampList()
+ .equals(other.getServerTimestampList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasTable()) {
+ hash = (37 * hash) + TABLE_FIELD_NUMBER;
+ hash = (53 * hash) + getTable().hashCode();
+ }
+ if (getServerTimestampCount() > 0) {
+ hash = (37 * hash) + SERVER_TIMESTAMP_FIELD_NUMBER;
+ hash = (53 * hash) + getServerTimestampList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.TableServerTimestamp}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getTableFieldBuilder();
+ getServerTimestampFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (tableBuilder_ == null) {
+ table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ } else {
+ tableBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (serverTimestampBuilder_ == null) {
+ serverTimestamp_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ serverTimestampBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp build() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (tableBuilder_ == null) {
+ result.table_ = table_;
+ } else {
+ result.table_ = tableBuilder_.build();
+ }
+ if (serverTimestampBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ serverTimestamp_ = java.util.Collections.unmodifiableList(serverTimestamp_);
+ bitField0_ = (bitField0_ & ~0x00000002);
+ }
+ result.serverTimestamp_ = serverTimestamp_;
+ } else {
+ result.serverTimestamp_ = serverTimestampBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance()) return this;
+ if (other.hasTable()) {
+ mergeTable(other.getTable());
+ }
+ if (serverTimestampBuilder_ == null) {
+ if (!other.serverTimestamp_.isEmpty()) {
+ if (serverTimestamp_.isEmpty()) {
+ serverTimestamp_ = other.serverTimestamp_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ ensureServerTimestampIsMutable();
+ serverTimestamp_.addAll(other.serverTimestamp_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.serverTimestamp_.isEmpty()) {
+ if (serverTimestampBuilder_.isEmpty()) {
+ serverTimestampBuilder_.dispose();
+ serverTimestampBuilder_ = null;
+ serverTimestamp_ = other.serverTimestamp_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ serverTimestampBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getServerTimestampFieldBuilder() : null;
+ } else {
+ serverTimestampBuilder_.addAllMessages(other.serverTimestamp_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasTable()) {
+
+ return false;
+ }
+ if (!getTable().isInitialized()) {
+
+ return false;
+ }
+ for (int i = 0; i < getServerTimestampCount(); i++) {
+ if (!getServerTimestamp(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .hbase.pb.TableName table = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableBuilder_;
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public boolean hasTable() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() {
+ if (tableBuilder_ == null) {
+ return table_;
+ } else {
+ return tableBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ table_ = value;
+ onChanged();
+ } else {
+ tableBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public Builder setTable(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableBuilder_ == null) {
+ table_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+ table_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial();
+ } else {
+ table_ = value;
+ }
+ onChanged();
+ } else {
+ tableBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public Builder clearTable() {
+ if (tableBuilder_ == null) {
+ table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ onChanged();
+ } else {
+ tableBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getTableFieldBuilder().getBuilder();
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() {
+ if (tableBuilder_ != null) {
+ return tableBuilder_.getMessageOrBuilder();
+ } else {
+ return table_;
+ }
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableFieldBuilder() {
+ if (tableBuilder_ == null) {
+ tableBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ table_,
+ getParentForChildren(),
+ isClean());
+ table_ = null;
+ }
+ return tableBuilder_;
+ }
+
+ // repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ private java.util.List serverTimestamp_ =
+ java.util.Collections.emptyList();
+ private void ensureServerTimestampIsMutable() {
+ if (!((bitField0_ & 0x00000002) == 0x00000002)) {
+ serverTimestamp_ = new java.util.ArrayList(serverTimestamp_);
+ bitField0_ |= 0x00000002;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder> serverTimestampBuilder_;
+
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public java.util.List getServerTimestampList() {
+ if (serverTimestampBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(serverTimestamp_);
+ } else {
+ return serverTimestampBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public int getServerTimestampCount() {
+ if (serverTimestampBuilder_ == null) {
+ return serverTimestamp_.size();
+ } else {
+ return serverTimestampBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index) {
+ if (serverTimestampBuilder_ == null) {
+ return serverTimestamp_.get(index);
+ } else {
+ return serverTimestampBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public Builder setServerTimestamp(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) {
+ if (serverTimestampBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureServerTimestampIsMutable();
+ serverTimestamp_.set(index, value);
+ onChanged();
+ } else {
+ serverTimestampBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public Builder setServerTimestamp(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) {
+ if (serverTimestampBuilder_ == null) {
+ ensureServerTimestampIsMutable();
+ serverTimestamp_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ serverTimestampBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public Builder addServerTimestamp(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) {
+ if (serverTimestampBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureServerTimestampIsMutable();
+ serverTimestamp_.add(value);
+ onChanged();
+ } else {
+ serverTimestampBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public Builder addServerTimestamp(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) {
+ if (serverTimestampBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureServerTimestampIsMutable();
+ serverTimestamp_.add(index, value);
+ onChanged();
+ } else {
+ serverTimestampBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public Builder addServerTimestamp(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) {
+ if (serverTimestampBuilder_ == null) {
+ ensureServerTimestampIsMutable();
+ serverTimestamp_.add(builderForValue.build());
+ onChanged();
+ } else {
+ serverTimestampBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public Builder addServerTimestamp(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) {
+ if (serverTimestampBuilder_ == null) {
+ ensureServerTimestampIsMutable();
+ serverTimestamp_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ serverTimestampBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public Builder addAllServerTimestamp(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp> values) {
+ if (serverTimestampBuilder_ == null) {
+ ensureServerTimestampIsMutable();
+ super.addAll(values, serverTimestamp_);
+ onChanged();
+ } else {
+ serverTimestampBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public Builder clearServerTimestamp() {
+ if (serverTimestampBuilder_ == null) {
+ serverTimestamp_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ onChanged();
+ } else {
+ serverTimestampBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public Builder removeServerTimestamp(int index) {
+ if (serverTimestampBuilder_ == null) {
+ ensureServerTimestampIsMutable();
+ serverTimestamp_.remove(index);
+ onChanged();
+ } else {
+ serverTimestampBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder getServerTimestampBuilder(
+ int index) {
+ return getServerTimestampFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder(
+ int index) {
+ if (serverTimestampBuilder_ == null) {
+ return serverTimestamp_.get(index); } else {
+ return serverTimestampBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder>
+ getServerTimestampOrBuilderList() {
+ if (serverTimestampBuilder_ != null) {
+ return serverTimestampBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(serverTimestamp_);
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder addServerTimestampBuilder() {
+ return getServerTimestampFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder addServerTimestampBuilder(
+ int index) {
+ return getServerTimestampFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.ServerTimestamp server_timestamp = 2;
+ */
+ public java.util.List
+ getServerTimestampBuilderList() {
+ return getServerTimestampFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder>
+ getServerTimestampFieldBuilder() {
+ if (serverTimestampBuilder_ == null) {
+ serverTimestampBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder>(
+ serverTimestamp_,
+ ((bitField0_ & 0x00000002) == 0x00000002),
+ getParentForChildren(),
+ isClean());
+ serverTimestamp_ = null;
+ }
+ return serverTimestampBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.TableServerTimestamp)
+ }
+
+ static {
+ defaultInstance = new TableServerTimestamp(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.TableServerTimestamp)
+ }
+
+ public interface BackupManifestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string version = 1;
+ /**
+ * required string version = 1;
+ */
+ boolean hasVersion();
+ /**
+ * required string version = 1;
+ */
+ java.lang.String getVersion();
+ /**
+ * required string version = 1;
+ */
+ com.google.protobuf.ByteString
+ getVersionBytes();
+
+ // required string backup_id = 2;
+ /**
+ * required string backup_id = 2;
+ */
+ boolean hasBackupId();
+ /**
+ * required string backup_id = 2;
+ */
+ java.lang.String getBackupId();
+ /**
+ * required string backup_id = 2;
+ */
+ com.google.protobuf.ByteString
+ getBackupIdBytes();
+
+ // required .hbase.pb.BackupType type = 3;
+ /**
+ * required .hbase.pb.BackupType type = 3;
+ */
+ boolean hasType();
+ /**
+ * required .hbase.pb.BackupType type = 3;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType();
+
+ // repeated .hbase.pb.TableName table_list = 4;
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ java.util.List
+ getTableListList();
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index);
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ int getTableListCount();
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableListOrBuilderList();
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder(
+ int index);
+
+ // required uint64 start_ts = 5;
+ /**
+ * required uint64 start_ts = 5;
+ */
+ boolean hasStartTs();
+ /**
+ * required uint64 start_ts = 5;
+ */
+ long getStartTs();
+
+ // required uint64 complete_ts = 6;
+ /**
+ * required uint64 complete_ts = 6;
+ */
+ boolean hasCompleteTs();
+ /**
+ * required uint64 complete_ts = 6;
+ */
+ long getCompleteTs();
+
+ // required int64 total_bytes = 7;
+ /**
+ * required int64 total_bytes = 7;
+ */
+ boolean hasTotalBytes();
+ /**
+ * required int64 total_bytes = 7;
+ */
+ long getTotalBytes();
+
+ // optional int64 log_bytes = 8;
+ /**
+ * optional int64 log_bytes = 8;
+ */
+ boolean hasLogBytes();
+ /**
+ * optional int64 log_bytes = 8;
+ */
+ long getLogBytes();
+
+ // repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ java.util.List
+ getTstMapList();
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getTstMap(int index);
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ int getTstMapCount();
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder>
+ getTstMapOrBuilderList();
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder getTstMapOrBuilder(
+ int index);
+
+ // repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ java.util.List
+ getDependentBackupImageList();
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDependentBackupImage(int index);
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ int getDependentBackupImageCount();
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder>
+ getDependentBackupImageOrBuilderList();
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getDependentBackupImageOrBuilder(
+ int index);
+
+ // required bool compacted = 11;
+ /**
+ * required bool compacted = 11;
+ */
+ boolean hasCompacted();
+ /**
+ * required bool compacted = 11;
+ */
+ boolean getCompacted();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.BackupManifest}
+ */
+ public static final class BackupManifest extends
+ com.google.protobuf.GeneratedMessage
+ implements BackupManifestOrBuilder {
+ // Use BackupManifest.newBuilder() to construct.
+ private BackupManifest(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private BackupManifest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final BackupManifest defaultInstance;
+ public static BackupManifest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public BackupManifest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private BackupManifest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ version_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ backupId_ = input.readBytes();
+ break;
+ }
+ case 24: {
+ int rawValue = input.readEnum();
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(3, rawValue);
+ } else {
+ bitField0_ |= 0x00000004;
+ type_ = value;
+ }
+ break;
+ }
+ case 34: {
+ if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
+ tableList_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000008;
+ }
+ tableList_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry));
+ break;
+ }
+ case 40: {
+ bitField0_ |= 0x00000008;
+ startTs_ = input.readUInt64();
+ break;
+ }
+ case 48: {
+ bitField0_ |= 0x00000010;
+ completeTs_ = input.readUInt64();
+ break;
+ }
+ case 56: {
+ bitField0_ |= 0x00000020;
+ totalBytes_ = input.readInt64();
+ break;
+ }
+ case 64: {
+ bitField0_ |= 0x00000040;
+ logBytes_ = input.readInt64();
+ break;
+ }
+ case 74: {
+ if (!((mutable_bitField0_ & 0x00000100) == 0x00000100)) {
+ tstMap_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000100;
+ }
+ tstMap_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.PARSER, extensionRegistry));
+ break;
+ }
+ case 82: {
+ if (!((mutable_bitField0_ & 0x00000200) == 0x00000200)) {
+ dependentBackupImage_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000200;
+ }
+ dependentBackupImage_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.PARSER, extensionRegistry));
+ break;
+ }
+ case 88: {
+ bitField0_ |= 0x00000080;
+ compacted_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
+ tableList_ = java.util.Collections.unmodifiableList(tableList_);
+ }
+ if (((mutable_bitField0_ & 0x00000100) == 0x00000100)) {
+ tstMap_ = java.util.Collections.unmodifiableList(tstMap_);
+ }
+ if (((mutable_bitField0_ & 0x00000200) == 0x00000200)) {
+ dependentBackupImage_ = java.util.Collections.unmodifiableList(dependentBackupImage_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public BackupManifest parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new BackupManifest(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string version = 1;
+ public static final int VERSION_FIELD_NUMBER = 1;
+ private java.lang.Object version_;
+ /**
+ * required string version = 1;
+ */
+ public boolean hasVersion() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string version = 1;
+ */
+ public java.lang.String getVersion() {
+ java.lang.Object ref = version_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ version_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string version = 1;
+ */
+ public com.google.protobuf.ByteString
+ getVersionBytes() {
+ java.lang.Object ref = version_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ version_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required string backup_id = 2;
+ public static final int BACKUP_ID_FIELD_NUMBER = 2;
+ private java.lang.Object backupId_;
+ /**
+ * required string backup_id = 2;
+ */
+ public boolean hasBackupId() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string backup_id = 2;
+ */
+ public java.lang.String getBackupId() {
+ java.lang.Object ref = backupId_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ backupId_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string backup_id = 2;
+ */
+ public com.google.protobuf.ByteString
+ getBackupIdBytes() {
+ java.lang.Object ref = backupId_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ backupId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required .hbase.pb.BackupType type = 3;
+ public static final int TYPE_FIELD_NUMBER = 3;
+ private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_;
+ /**
+ * required .hbase.pb.BackupType type = 3;
+ */
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required .hbase.pb.BackupType type = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() {
+ return type_;
+ }
+
+ // repeated .hbase.pb.TableName table_list = 4;
+ public static final int TABLE_LIST_FIELD_NUMBER = 4;
+ private java.util.List tableList_;
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public java.util.List getTableListList() {
+ return tableList_;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableListOrBuilderList() {
+ return tableList_;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public int getTableListCount() {
+ return tableList_.size();
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) {
+ return tableList_.get(index);
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder(
+ int index) {
+ return tableList_.get(index);
+ }
+
+ // required uint64 start_ts = 5;
+ public static final int START_TS_FIELD_NUMBER = 5;
+ private long startTs_;
+ /**
+ * required uint64 start_ts = 5;
+ */
+ public boolean hasStartTs() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * required uint64 start_ts = 5;
+ */
+ public long getStartTs() {
+ return startTs_;
+ }
+
+ // required uint64 complete_ts = 6;
+ public static final int COMPLETE_TS_FIELD_NUMBER = 6;
+ private long completeTs_;
+ /**
+ * required uint64 complete_ts = 6;
+ */
+ public boolean hasCompleteTs() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * required uint64 complete_ts = 6;
+ */
+ public long getCompleteTs() {
+ return completeTs_;
+ }
+
+ // required int64 total_bytes = 7;
+ public static final int TOTAL_BYTES_FIELD_NUMBER = 7;
+ private long totalBytes_;
+ /**
+ * required int64 total_bytes = 7;
+ */
+ public boolean hasTotalBytes() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * required int64 total_bytes = 7;
+ */
+ public long getTotalBytes() {
+ return totalBytes_;
+ }
+
+ // optional int64 log_bytes = 8;
+ public static final int LOG_BYTES_FIELD_NUMBER = 8;
+ private long logBytes_;
+ /**
+ * optional int64 log_bytes = 8;
+ */
+ public boolean hasLogBytes() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * optional int64 log_bytes = 8;
+ */
+ public long getLogBytes() {
+ return logBytes_;
+ }
+
+ // repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ public static final int TST_MAP_FIELD_NUMBER = 9;
+ private java.util.List tstMap_;
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public java.util.List getTstMapList() {
+ return tstMap_;
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder>
+ getTstMapOrBuilderList() {
+ return tstMap_;
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public int getTstMapCount() {
+ return tstMap_.size();
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getTstMap(int index) {
+ return tstMap_.get(index);
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder getTstMapOrBuilder(
+ int index) {
+ return tstMap_.get(index);
+ }
+
+ // repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ public static final int DEPENDENT_BACKUP_IMAGE_FIELD_NUMBER = 10;
+ private java.util.List dependentBackupImage_;
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public java.util.List getDependentBackupImageList() {
+ return dependentBackupImage_;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder>
+ getDependentBackupImageOrBuilderList() {
+ return dependentBackupImage_;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public int getDependentBackupImageCount() {
+ return dependentBackupImage_.size();
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDependentBackupImage(int index) {
+ return dependentBackupImage_.get(index);
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getDependentBackupImageOrBuilder(
+ int index) {
+ return dependentBackupImage_.get(index);
+ }
+
+ // required bool compacted = 11;
+ public static final int COMPACTED_FIELD_NUMBER = 11;
+ private boolean compacted_;
+ /**
+ * required bool compacted = 11;
+ */
+ public boolean hasCompacted() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ /**
+ * required bool compacted = 11;
+ */
+ public boolean getCompacted() {
+ return compacted_;
+ }
+
+ private void initFields() {
+ version_ = "";
+ backupId_ = "";
+ type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL;
+ tableList_ = java.util.Collections.emptyList();
+ startTs_ = 0L;
+ completeTs_ = 0L;
+ totalBytes_ = 0L;
+ logBytes_ = 0L;
+ tstMap_ = java.util.Collections.emptyList();
+ dependentBackupImage_ = java.util.Collections.emptyList();
+ compacted_ = false;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasVersion()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasBackupId()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasType()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasStartTs()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasCompleteTs()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTotalBytes()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasCompacted()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getTableListCount(); i++) {
+ if (!getTableList(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ for (int i = 0; i < getTstMapCount(); i++) {
+ if (!getTstMap(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ for (int i = 0; i < getDependentBackupImageCount(); i++) {
+ if (!getDependentBackupImage(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getVersionBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getBackupIdBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeEnum(3, type_.getNumber());
+ }
+ for (int i = 0; i < tableList_.size(); i++) {
+ output.writeMessage(4, tableList_.get(i));
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeUInt64(5, startTs_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeUInt64(6, completeTs_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ output.writeInt64(7, totalBytes_);
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ output.writeInt64(8, logBytes_);
+ }
+ for (int i = 0; i < tstMap_.size(); i++) {
+ output.writeMessage(9, tstMap_.get(i));
+ }
+ for (int i = 0; i < dependentBackupImage_.size(); i++) {
+ output.writeMessage(10, dependentBackupImage_.get(i));
+ }
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ output.writeBool(11, compacted_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getVersionBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getBackupIdBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(3, type_.getNumber());
+ }
+ for (int i = 0; i < tableList_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(4, tableList_.get(i));
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(5, startTs_);
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(6, completeTs_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(7, totalBytes_);
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(8, logBytes_);
+ }
+ for (int i = 0; i < tstMap_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(9, tstMap_.get(i));
+ }
+ for (int i = 0; i < dependentBackupImage_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(10, dependentBackupImage_.get(i));
+ }
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(11, compacted_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest) obj;
+
+ boolean result = true;
+ result = result && (hasVersion() == other.hasVersion());
+ if (hasVersion()) {
+ result = result && getVersion()
+ .equals(other.getVersion());
+ }
+ result = result && (hasBackupId() == other.hasBackupId());
+ if (hasBackupId()) {
+ result = result && getBackupId()
+ .equals(other.getBackupId());
+ }
+ result = result && (hasType() == other.hasType());
+ if (hasType()) {
+ result = result &&
+ (getType() == other.getType());
+ }
+ result = result && getTableListList()
+ .equals(other.getTableListList());
+ result = result && (hasStartTs() == other.hasStartTs());
+ if (hasStartTs()) {
+ result = result && (getStartTs()
+ == other.getStartTs());
+ }
+ result = result && (hasCompleteTs() == other.hasCompleteTs());
+ if (hasCompleteTs()) {
+ result = result && (getCompleteTs()
+ == other.getCompleteTs());
+ }
+ result = result && (hasTotalBytes() == other.hasTotalBytes());
+ if (hasTotalBytes()) {
+ result = result && (getTotalBytes()
+ == other.getTotalBytes());
+ }
+ result = result && (hasLogBytes() == other.hasLogBytes());
+ if (hasLogBytes()) {
+ result = result && (getLogBytes()
+ == other.getLogBytes());
+ }
+ result = result && getTstMapList()
+ .equals(other.getTstMapList());
+ result = result && getDependentBackupImageList()
+ .equals(other.getDependentBackupImageList());
+ result = result && (hasCompacted() == other.hasCompacted());
+ if (hasCompacted()) {
+ result = result && (getCompacted()
+ == other.getCompacted());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasVersion()) {
+ hash = (37 * hash) + VERSION_FIELD_NUMBER;
+ hash = (53 * hash) + getVersion().hashCode();
+ }
+ if (hasBackupId()) {
+ hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER;
+ hash = (53 * hash) + getBackupId().hashCode();
+ }
+ if (hasType()) {
+ hash = (37 * hash) + TYPE_FIELD_NUMBER;
+ hash = (53 * hash) + hashEnum(getType());
+ }
+ if (getTableListCount() > 0) {
+ hash = (37 * hash) + TABLE_LIST_FIELD_NUMBER;
+ hash = (53 * hash) + getTableListList().hashCode();
+ }
+ if (hasStartTs()) {
+ hash = (37 * hash) + START_TS_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getStartTs());
+ }
+ if (hasCompleteTs()) {
+ hash = (37 * hash) + COMPLETE_TS_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getCompleteTs());
+ }
+ if (hasTotalBytes()) {
+ hash = (37 * hash) + TOTAL_BYTES_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getTotalBytes());
+ }
+ if (hasLogBytes()) {
+ hash = (37 * hash) + LOG_BYTES_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getLogBytes());
+ }
+ if (getTstMapCount() > 0) {
+ hash = (37 * hash) + TST_MAP_FIELD_NUMBER;
+ hash = (53 * hash) + getTstMapList().hashCode();
+ }
+ if (getDependentBackupImageCount() > 0) {
+ hash = (37 * hash) + DEPENDENT_BACKUP_IMAGE_FIELD_NUMBER;
+ hash = (53 * hash) + getDependentBackupImageList().hashCode();
+ }
+ if (hasCompacted()) {
+ hash = (37 * hash) + COMPACTED_FIELD_NUMBER;
+ hash = (53 * hash) + hashBoolean(getCompacted());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.BackupManifest}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getTableListFieldBuilder();
+ getTstMapFieldBuilder();
+ getDependentBackupImageFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ version_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ backupId_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ if (tableListBuilder_ == null) {
+ tableList_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000008);
+ } else {
+ tableListBuilder_.clear();
+ }
+ startTs_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ completeTs_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000020);
+ totalBytes_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000040);
+ logBytes_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000080);
+ if (tstMapBuilder_ == null) {
+ tstMap_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000100);
+ } else {
+ tstMapBuilder_.clear();
+ }
+ if (dependentBackupImageBuilder_ == null) {
+ dependentBackupImage_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000200);
+ } else {
+ dependentBackupImageBuilder_.clear();
+ }
+ compacted_ = false;
+ bitField0_ = (bitField0_ & ~0x00000400);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest build() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.version_ = version_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.backupId_ = backupId_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.type_ = type_;
+ if (tableListBuilder_ == null) {
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ tableList_ = java.util.Collections.unmodifiableList(tableList_);
+ bitField0_ = (bitField0_ & ~0x00000008);
+ }
+ result.tableList_ = tableList_;
+ } else {
+ result.tableList_ = tableListBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.startTs_ = startTs_;
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ result.completeTs_ = completeTs_;
+ if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
+ to_bitField0_ |= 0x00000020;
+ }
+ result.totalBytes_ = totalBytes_;
+ if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
+ to_bitField0_ |= 0x00000040;
+ }
+ result.logBytes_ = logBytes_;
+ if (tstMapBuilder_ == null) {
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ tstMap_ = java.util.Collections.unmodifiableList(tstMap_);
+ bitField0_ = (bitField0_ & ~0x00000100);
+ }
+ result.tstMap_ = tstMap_;
+ } else {
+ result.tstMap_ = tstMapBuilder_.build();
+ }
+ if (dependentBackupImageBuilder_ == null) {
+ if (((bitField0_ & 0x00000200) == 0x00000200)) {
+ dependentBackupImage_ = java.util.Collections.unmodifiableList(dependentBackupImage_);
+ bitField0_ = (bitField0_ & ~0x00000200);
+ }
+ result.dependentBackupImage_ = dependentBackupImage_;
+ } else {
+ result.dependentBackupImage_ = dependentBackupImageBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
+ to_bitField0_ |= 0x00000080;
+ }
+ result.compacted_ = compacted_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.getDefaultInstance()) return this;
+ if (other.hasVersion()) {
+ bitField0_ |= 0x00000001;
+ version_ = other.version_;
+ onChanged();
+ }
+ if (other.hasBackupId()) {
+ bitField0_ |= 0x00000002;
+ backupId_ = other.backupId_;
+ onChanged();
+ }
+ if (other.hasType()) {
+ setType(other.getType());
+ }
+ if (tableListBuilder_ == null) {
+ if (!other.tableList_.isEmpty()) {
+ if (tableList_.isEmpty()) {
+ tableList_ = other.tableList_;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ } else {
+ ensureTableListIsMutable();
+ tableList_.addAll(other.tableList_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.tableList_.isEmpty()) {
+ if (tableListBuilder_.isEmpty()) {
+ tableListBuilder_.dispose();
+ tableListBuilder_ = null;
+ tableList_ = other.tableList_;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ tableListBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getTableListFieldBuilder() : null;
+ } else {
+ tableListBuilder_.addAllMessages(other.tableList_);
+ }
+ }
+ }
+ if (other.hasStartTs()) {
+ setStartTs(other.getStartTs());
+ }
+ if (other.hasCompleteTs()) {
+ setCompleteTs(other.getCompleteTs());
+ }
+ if (other.hasTotalBytes()) {
+ setTotalBytes(other.getTotalBytes());
+ }
+ if (other.hasLogBytes()) {
+ setLogBytes(other.getLogBytes());
+ }
+ if (tstMapBuilder_ == null) {
+ if (!other.tstMap_.isEmpty()) {
+ if (tstMap_.isEmpty()) {
+ tstMap_ = other.tstMap_;
+ bitField0_ = (bitField0_ & ~0x00000100);
+ } else {
+ ensureTstMapIsMutable();
+ tstMap_.addAll(other.tstMap_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.tstMap_.isEmpty()) {
+ if (tstMapBuilder_.isEmpty()) {
+ tstMapBuilder_.dispose();
+ tstMapBuilder_ = null;
+ tstMap_ = other.tstMap_;
+ bitField0_ = (bitField0_ & ~0x00000100);
+ tstMapBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getTstMapFieldBuilder() : null;
+ } else {
+ tstMapBuilder_.addAllMessages(other.tstMap_);
+ }
+ }
+ }
+ if (dependentBackupImageBuilder_ == null) {
+ if (!other.dependentBackupImage_.isEmpty()) {
+ if (dependentBackupImage_.isEmpty()) {
+ dependentBackupImage_ = other.dependentBackupImage_;
+ bitField0_ = (bitField0_ & ~0x00000200);
+ } else {
+ ensureDependentBackupImageIsMutable();
+ dependentBackupImage_.addAll(other.dependentBackupImage_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.dependentBackupImage_.isEmpty()) {
+ if (dependentBackupImageBuilder_.isEmpty()) {
+ dependentBackupImageBuilder_.dispose();
+ dependentBackupImageBuilder_ = null;
+ dependentBackupImage_ = other.dependentBackupImage_;
+ bitField0_ = (bitField0_ & ~0x00000200);
+ dependentBackupImageBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getDependentBackupImageFieldBuilder() : null;
+ } else {
+ dependentBackupImageBuilder_.addAllMessages(other.dependentBackupImage_);
+ }
+ }
+ }
+ if (other.hasCompacted()) {
+ setCompacted(other.getCompacted());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasVersion()) {
+
+ return false;
+ }
+ if (!hasBackupId()) {
+
+ return false;
+ }
+ if (!hasType()) {
+
+ return false;
+ }
+ if (!hasStartTs()) {
+
+ return false;
+ }
+ if (!hasCompleteTs()) {
+
+ return false;
+ }
+ if (!hasTotalBytes()) {
+
+ return false;
+ }
+ if (!hasCompacted()) {
+
+ return false;
+ }
+ for (int i = 0; i < getTableListCount(); i++) {
+ if (!getTableList(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ for (int i = 0; i < getTstMapCount(); i++) {
+ if (!getTstMap(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ for (int i = 0; i < getDependentBackupImageCount(); i++) {
+ if (!getDependentBackupImage(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string version = 1;
+ private java.lang.Object version_ = "";
+ /**
+ * required string version = 1;
+ */
+ public boolean hasVersion() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string version = 1;
+ */
+ public java.lang.String getVersion() {
+ java.lang.Object ref = version_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ version_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string version = 1;
+ */
+ public com.google.protobuf.ByteString
+ getVersionBytes() {
+ java.lang.Object ref = version_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ version_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string version = 1;
+ */
+ public Builder setVersion(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ version_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string version = 1;
+ */
+ public Builder clearVersion() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ version_ = getDefaultInstance().getVersion();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string version = 1;
+ */
+ public Builder setVersionBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ version_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required string backup_id = 2;
+ private java.lang.Object backupId_ = "";
+ /**
+ * required string backup_id = 2;
+ */
+ public boolean hasBackupId() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string backup_id = 2;
+ */
+ public java.lang.String getBackupId() {
+ java.lang.Object ref = backupId_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ backupId_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string backup_id = 2;
+ */
+ public com.google.protobuf.ByteString
+ getBackupIdBytes() {
+ java.lang.Object ref = backupId_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ backupId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string backup_id = 2;
+ */
+ public Builder setBackupId(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ backupId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string backup_id = 2;
+ */
+ public Builder clearBackupId() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ backupId_ = getDefaultInstance().getBackupId();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string backup_id = 2;
+ */
+ public Builder setBackupIdBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ backupId_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required .hbase.pb.BackupType type = 3;
+ private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL;
+ /**
+ * required .hbase.pb.BackupType type = 3;
+ */
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required .hbase.pb.BackupType type = 3;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() {
+ return type_;
+ }
+ /**
+ * required .hbase.pb.BackupType type = 3;
+ */
+ public Builder setType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ type_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required .hbase.pb.BackupType type = 3;
+ */
+ public Builder clearType() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL;
+ onChanged();
+ return this;
+ }
+
+ // repeated .hbase.pb.TableName table_list = 4;
+ private java.util.List tableList_ =
+ java.util.Collections.emptyList();
+ private void ensureTableListIsMutable() {
+ if (!((bitField0_ & 0x00000008) == 0x00000008)) {
+ tableList_ = new java.util.ArrayList(tableList_);
+ bitField0_ |= 0x00000008;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableListBuilder_;
+
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public java.util.List getTableListList() {
+ if (tableListBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(tableList_);
+ } else {
+ return tableListBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public int getTableListCount() {
+ if (tableListBuilder_ == null) {
+ return tableList_.size();
+ } else {
+ return tableListBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) {
+ if (tableListBuilder_ == null) {
+ return tableList_.get(index);
+ } else {
+ return tableListBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public Builder setTableList(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableListBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTableListIsMutable();
+ tableList_.set(index, value);
+ onChanged();
+ } else {
+ tableListBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public Builder setTableList(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableListBuilder_ == null) {
+ ensureTableListIsMutable();
+ tableList_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ tableListBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public Builder addTableList(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableListBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTableListIsMutable();
+ tableList_.add(value);
+ onChanged();
+ } else {
+ tableListBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public Builder addTableList(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableListBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTableListIsMutable();
+ tableList_.add(index, value);
+ onChanged();
+ } else {
+ tableListBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public Builder addTableList(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableListBuilder_ == null) {
+ ensureTableListIsMutable();
+ tableList_.add(builderForValue.build());
+ onChanged();
+ } else {
+ tableListBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public Builder addTableList(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableListBuilder_ == null) {
+ ensureTableListIsMutable();
+ tableList_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ tableListBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public Builder addAllTableList(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> values) {
+ if (tableListBuilder_ == null) {
+ ensureTableListIsMutable();
+ super.addAll(values, tableList_);
+ onChanged();
+ } else {
+ tableListBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public Builder clearTableList() {
+ if (tableListBuilder_ == null) {
+ tableList_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000008);
+ onChanged();
+ } else {
+ tableListBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public Builder removeTableList(int index) {
+ if (tableListBuilder_ == null) {
+ ensureTableListIsMutable();
+ tableList_.remove(index);
+ onChanged();
+ } else {
+ tableListBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableListBuilder(
+ int index) {
+ return getTableListFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder(
+ int index) {
+ if (tableListBuilder_ == null) {
+ return tableList_.get(index); } else {
+ return tableListBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableListOrBuilderList() {
+ if (tableListBuilder_ != null) {
+ return tableListBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(tableList_);
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder() {
+ return getTableListFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder(
+ int index) {
+ return getTableListFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.TableName table_list = 4;
+ */
+ public java.util.List
+ getTableListBuilderList() {
+ return getTableListFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableListFieldBuilder() {
+ if (tableListBuilder_ == null) {
+ tableListBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ tableList_,
+ ((bitField0_ & 0x00000008) == 0x00000008),
+ getParentForChildren(),
+ isClean());
+ tableList_ = null;
+ }
+ return tableListBuilder_;
+ }
+
+ // required uint64 start_ts = 5;
+ private long startTs_ ;
+ /**
+ * required uint64 start_ts = 5;
+ */
+ public boolean hasStartTs() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * required uint64 start_ts = 5;
+ */
+ public long getStartTs() {
+ return startTs_;
+ }
+ /**
+ * required uint64 start_ts = 5;
+ */
+ public Builder setStartTs(long value) {
+ bitField0_ |= 0x00000010;
+ startTs_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required uint64 start_ts = 5;
+ */
+ public Builder clearStartTs() {
+ bitField0_ = (bitField0_ & ~0x00000010);
+ startTs_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // required uint64 complete_ts = 6;
+ private long completeTs_ ;
+ /**
+ * required uint64 complete_ts = 6;
+ */
+ public boolean hasCompleteTs() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * required uint64 complete_ts = 6;
+ */
+ public long getCompleteTs() {
+ return completeTs_;
+ }
+ /**
+ * required uint64 complete_ts = 6;
+ */
+ public Builder setCompleteTs(long value) {
+ bitField0_ |= 0x00000020;
+ completeTs_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required uint64 complete_ts = 6;
+ */
+ public Builder clearCompleteTs() {
+ bitField0_ = (bitField0_ & ~0x00000020);
+ completeTs_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // required int64 total_bytes = 7;
+ private long totalBytes_ ;
+ /**
+ * required int64 total_bytes = 7;
+ */
+ public boolean hasTotalBytes() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * required int64 total_bytes = 7;
+ */
+ public long getTotalBytes() {
+ return totalBytes_;
+ }
+ /**
+ * required int64 total_bytes = 7;
+ */
+ public Builder setTotalBytes(long value) {
+ bitField0_ |= 0x00000040;
+ totalBytes_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required int64 total_bytes = 7;
+ */
+ public Builder clearTotalBytes() {
+ bitField0_ = (bitField0_ & ~0x00000040);
+ totalBytes_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional int64 log_bytes = 8;
+ private long logBytes_ ;
+ /**
+ * optional int64 log_bytes = 8;
+ */
+ public boolean hasLogBytes() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ /**
+ * optional int64 log_bytes = 8;
+ */
+ public long getLogBytes() {
+ return logBytes_;
+ }
+ /**
+ * optional int64 log_bytes = 8;
+ */
+ public Builder setLogBytes(long value) {
+ bitField0_ |= 0x00000080;
+ logBytes_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int64 log_bytes = 8;
+ */
+ public Builder clearLogBytes() {
+ bitField0_ = (bitField0_ & ~0x00000080);
+ logBytes_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ private java.util.List tstMap_ =
+ java.util.Collections.emptyList();
+ private void ensureTstMapIsMutable() {
+ if (!((bitField0_ & 0x00000100) == 0x00000100)) {
+ tstMap_ = new java.util.ArrayList(tstMap_);
+ bitField0_ |= 0x00000100;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder> tstMapBuilder_;
+
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public java.util.List getTstMapList() {
+ if (tstMapBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(tstMap_);
+ } else {
+ return tstMapBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public int getTstMapCount() {
+ if (tstMapBuilder_ == null) {
+ return tstMap_.size();
+ } else {
+ return tstMapBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getTstMap(int index) {
+ if (tstMapBuilder_ == null) {
+ return tstMap_.get(index);
+ } else {
+ return tstMapBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public Builder setTstMap(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp value) {
+ if (tstMapBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTstMapIsMutable();
+ tstMap_.set(index, value);
+ onChanged();
+ } else {
+ tstMapBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public Builder setTstMap(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder builderForValue) {
+ if (tstMapBuilder_ == null) {
+ ensureTstMapIsMutable();
+ tstMap_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ tstMapBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public Builder addTstMap(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp value) {
+ if (tstMapBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTstMapIsMutable();
+ tstMap_.add(value);
+ onChanged();
+ } else {
+ tstMapBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public Builder addTstMap(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp value) {
+ if (tstMapBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTstMapIsMutable();
+ tstMap_.add(index, value);
+ onChanged();
+ } else {
+ tstMapBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public Builder addTstMap(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder builderForValue) {
+ if (tstMapBuilder_ == null) {
+ ensureTstMapIsMutable();
+ tstMap_.add(builderForValue.build());
+ onChanged();
+ } else {
+ tstMapBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public Builder addTstMap(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder builderForValue) {
+ if (tstMapBuilder_ == null) {
+ ensureTstMapIsMutable();
+ tstMap_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ tstMapBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public Builder addAllTstMap(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp> values) {
+ if (tstMapBuilder_ == null) {
+ ensureTstMapIsMutable();
+ super.addAll(values, tstMap_);
+ onChanged();
+ } else {
+ tstMapBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public Builder clearTstMap() {
+ if (tstMapBuilder_ == null) {
+ tstMap_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000100);
+ onChanged();
+ } else {
+ tstMapBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public Builder removeTstMap(int index) {
+ if (tstMapBuilder_ == null) {
+ ensureTstMapIsMutable();
+ tstMap_.remove(index);
+ onChanged();
+ } else {
+ tstMapBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder getTstMapBuilder(
+ int index) {
+ return getTstMapFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder getTstMapOrBuilder(
+ int index) {
+ if (tstMapBuilder_ == null) {
+ return tstMap_.get(index); } else {
+ return tstMapBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder>
+ getTstMapOrBuilderList() {
+ if (tstMapBuilder_ != null) {
+ return tstMapBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(tstMap_);
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder addTstMapBuilder() {
+ return getTstMapFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder addTstMapBuilder(
+ int index) {
+ return getTstMapFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.TableServerTimestamp tst_map = 9;
+ */
+ public java.util.List
+ getTstMapBuilderList() {
+ return getTstMapFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder>
+ getTstMapFieldBuilder() {
+ if (tstMapBuilder_ == null) {
+ tstMapBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder>(
+ tstMap_,
+ ((bitField0_ & 0x00000100) == 0x00000100),
+ getParentForChildren(),
+ isClean());
+ tstMap_ = null;
+ }
+ return tstMapBuilder_;
+ }
+
+ // repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ private java.util.List dependentBackupImage_ =
+ java.util.Collections.emptyList();
+ private void ensureDependentBackupImageIsMutable() {
+ if (!((bitField0_ & 0x00000200) == 0x00000200)) {
+ dependentBackupImage_ = new java.util.ArrayList(dependentBackupImage_);
+ bitField0_ |= 0x00000200;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder> dependentBackupImageBuilder_;
+
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public java.util.List getDependentBackupImageList() {
+ if (dependentBackupImageBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(dependentBackupImage_);
+ } else {
+ return dependentBackupImageBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public int getDependentBackupImageCount() {
+ if (dependentBackupImageBuilder_ == null) {
+ return dependentBackupImage_.size();
+ } else {
+ return dependentBackupImageBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDependentBackupImage(int index) {
+ if (dependentBackupImageBuilder_ == null) {
+ return dependentBackupImage_.get(index);
+ } else {
+ return dependentBackupImageBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public Builder setDependentBackupImage(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) {
+ if (dependentBackupImageBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureDependentBackupImageIsMutable();
+ dependentBackupImage_.set(index, value);
+ onChanged();
+ } else {
+ dependentBackupImageBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public Builder setDependentBackupImage(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) {
+ if (dependentBackupImageBuilder_ == null) {
+ ensureDependentBackupImageIsMutable();
+ dependentBackupImage_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ dependentBackupImageBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public Builder addDependentBackupImage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) {
+ if (dependentBackupImageBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureDependentBackupImageIsMutable();
+ dependentBackupImage_.add(value);
+ onChanged();
+ } else {
+ dependentBackupImageBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public Builder addDependentBackupImage(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) {
+ if (dependentBackupImageBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureDependentBackupImageIsMutable();
+ dependentBackupImage_.add(index, value);
+ onChanged();
+ } else {
+ dependentBackupImageBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public Builder addDependentBackupImage(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) {
+ if (dependentBackupImageBuilder_ == null) {
+ ensureDependentBackupImageIsMutable();
+ dependentBackupImage_.add(builderForValue.build());
+ onChanged();
+ } else {
+ dependentBackupImageBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public Builder addDependentBackupImage(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) {
+ if (dependentBackupImageBuilder_ == null) {
+ ensureDependentBackupImageIsMutable();
+ dependentBackupImage_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ dependentBackupImageBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public Builder addAllDependentBackupImage(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage> values) {
+ if (dependentBackupImageBuilder_ == null) {
+ ensureDependentBackupImageIsMutable();
+ super.addAll(values, dependentBackupImage_);
+ onChanged();
+ } else {
+ dependentBackupImageBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public Builder clearDependentBackupImage() {
+ if (dependentBackupImageBuilder_ == null) {
+ dependentBackupImage_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000200);
+ onChanged();
+ } else {
+ dependentBackupImageBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public Builder removeDependentBackupImage(int index) {
+ if (dependentBackupImageBuilder_ == null) {
+ ensureDependentBackupImageIsMutable();
+ dependentBackupImage_.remove(index);
+ onChanged();
+ } else {
+ dependentBackupImageBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder getDependentBackupImageBuilder(
+ int index) {
+ return getDependentBackupImageFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getDependentBackupImageOrBuilder(
+ int index) {
+ if (dependentBackupImageBuilder_ == null) {
+ return dependentBackupImage_.get(index); } else {
+ return dependentBackupImageBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder>
+ getDependentBackupImageOrBuilderList() {
+ if (dependentBackupImageBuilder_ != null) {
+ return dependentBackupImageBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(dependentBackupImage_);
+ }
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addDependentBackupImageBuilder() {
+ return getDependentBackupImageFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addDependentBackupImageBuilder(
+ int index) {
+ return getDependentBackupImageFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.BackupImage dependent_backup_image = 10;
+ */
+ public java.util.List
+ getDependentBackupImageBuilderList() {
+ return getDependentBackupImageFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder>
+ getDependentBackupImageFieldBuilder() {
+ if (dependentBackupImageBuilder_ == null) {
+ dependentBackupImageBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder>(
+ dependentBackupImage_,
+ ((bitField0_ & 0x00000200) == 0x00000200),
+ getParentForChildren(),
+ isClean());
+ dependentBackupImage_ = null;
+ }
+ return dependentBackupImageBuilder_;
+ }
+
+ // required bool compacted = 11;
+ private boolean compacted_ ;
+ /**
+ * required bool compacted = 11;
+ */
+ public boolean hasCompacted() {
+ return ((bitField0_ & 0x00000400) == 0x00000400);
+ }
+ /**
+ * required bool compacted = 11;
+ */
+ public boolean getCompacted() {
+ return compacted_;
+ }
+ /**
+ * required bool compacted = 11;
+ */
+ public Builder setCompacted(boolean value) {
+ bitField0_ |= 0x00000400;
+ compacted_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required bool compacted = 11;
+ */
+ public Builder clearCompacted() {
+ bitField0_ = (bitField0_ & ~0x00000400);
+ compacted_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.BackupManifest)
+ }
+
+ static {
+ defaultInstance = new BackupManifest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.BackupManifest)
+ }
+
+ public interface TableBackupStatusOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .hbase.pb.TableName table = 1;
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ boolean hasTable();
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable();
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder();
+
+ // required string target_dir = 2;
+ /**
+ * required string target_dir = 2;
+ */
+ boolean hasTargetDir();
+ /**
+ * required string target_dir = 2;
+ */
+ java.lang.String getTargetDir();
+ /**
+ * required string target_dir = 2;
+ */
+ com.google.protobuf.ByteString
+ getTargetDirBytes();
+
+ // optional string snapshot = 3;
+ /**
+ * optional string snapshot = 3;
+ */
+ boolean hasSnapshot();
+ /**
+ * optional string snapshot = 3;
+ */
+ java.lang.String getSnapshot();
+ /**
+ * optional string snapshot = 3;
+ */
+ com.google.protobuf.ByteString
+ getSnapshotBytes();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.TableBackupStatus}
+ */
+ public static final class TableBackupStatus extends
+ com.google.protobuf.GeneratedMessage
+ implements TableBackupStatusOrBuilder {
+ // Use TableBackupStatus.newBuilder() to construct.
+ private TableBackupStatus(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private TableBackupStatus(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final TableBackupStatus defaultInstance;
+ public static TableBackupStatus getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TableBackupStatus getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private TableBackupStatus(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = table_.toBuilder();
+ }
+ table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(table_);
+ table_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ targetDir_ = input.readBytes();
+ break;
+ }
+ case 26: {
+ bitField0_ |= 0x00000004;
+ snapshot_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public TableBackupStatus parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new TableBackupStatus(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .hbase.pb.TableName table = 1;
+ public static final int TABLE_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_;
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public boolean hasTable() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() {
+ return table_;
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() {
+ return table_;
+ }
+
+ // required string target_dir = 2;
+ public static final int TARGET_DIR_FIELD_NUMBER = 2;
+ private java.lang.Object targetDir_;
+ /**
+ * required string target_dir = 2;
+ */
+ public boolean hasTargetDir() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string target_dir = 2;
+ */
+ public java.lang.String getTargetDir() {
+ java.lang.Object ref = targetDir_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ targetDir_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string target_dir = 2;
+ */
+ public com.google.protobuf.ByteString
+ getTargetDirBytes() {
+ java.lang.Object ref = targetDir_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ targetDir_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string snapshot = 3;
+ public static final int SNAPSHOT_FIELD_NUMBER = 3;
+ private java.lang.Object snapshot_;
+ /**
+ * optional string snapshot = 3;
+ */
+ public boolean hasSnapshot() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional string snapshot = 3;
+ */
+ public java.lang.String getSnapshot() {
+ java.lang.Object ref = snapshot_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ snapshot_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string snapshot = 3;
+ */
+ public com.google.protobuf.ByteString
+ getSnapshotBytes() {
+ java.lang.Object ref = snapshot_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ snapshot_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ targetDir_ = "";
+ snapshot_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasTable()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTargetDir()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTable().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, table_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getTargetDirBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBytes(3, getSnapshotBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, table_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getTargetDirBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(3, getSnapshotBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus) obj;
+
+ boolean result = true;
+ result = result && (hasTable() == other.hasTable());
+ if (hasTable()) {
+ result = result && getTable()
+ .equals(other.getTable());
+ }
+ result = result && (hasTargetDir() == other.hasTargetDir());
+ if (hasTargetDir()) {
+ result = result && getTargetDir()
+ .equals(other.getTargetDir());
+ }
+ result = result && (hasSnapshot() == other.hasSnapshot());
+ if (hasSnapshot()) {
+ result = result && getSnapshot()
+ .equals(other.getSnapshot());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasTable()) {
+ hash = (37 * hash) + TABLE_FIELD_NUMBER;
+ hash = (53 * hash) + getTable().hashCode();
+ }
+ if (hasTargetDir()) {
+ hash = (37 * hash) + TARGET_DIR_FIELD_NUMBER;
+ hash = (53 * hash) + getTargetDir().hashCode();
+ }
+ if (hasSnapshot()) {
+ hash = (37 * hash) + SNAPSHOT_FIELD_NUMBER;
+ hash = (53 * hash) + getSnapshot().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.TableBackupStatus}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getTableFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (tableBuilder_ == null) {
+ table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ } else {
+ tableBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ targetDir_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ snapshot_ = "";
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus build() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (tableBuilder_ == null) {
+ result.table_ = table_;
+ } else {
+ result.table_ = tableBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.targetDir_ = targetDir_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.snapshot_ = snapshot_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance()) return this;
+ if (other.hasTable()) {
+ mergeTable(other.getTable());
+ }
+ if (other.hasTargetDir()) {
+ bitField0_ |= 0x00000002;
+ targetDir_ = other.targetDir_;
+ onChanged();
+ }
+ if (other.hasSnapshot()) {
+ bitField0_ |= 0x00000004;
+ snapshot_ = other.snapshot_;
+ onChanged();
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasTable()) {
+
+ return false;
+ }
+ if (!hasTargetDir()) {
+
+ return false;
+ }
+ if (!getTable().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .hbase.pb.TableName table = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableBuilder_;
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public boolean hasTable() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() {
+ if (tableBuilder_ == null) {
+ return table_;
+ } else {
+ return tableBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ table_ = value;
+ onChanged();
+ } else {
+ tableBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public Builder setTable(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableBuilder_ == null) {
+ table_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+ table_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial();
+ } else {
+ table_ = value;
+ }
+ onChanged();
+ } else {
+ tableBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public Builder clearTable() {
+ if (tableBuilder_ == null) {
+ table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ onChanged();
+ } else {
+ tableBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getTableFieldBuilder().getBuilder();
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() {
+ if (tableBuilder_ != null) {
+ return tableBuilder_.getMessageOrBuilder();
+ } else {
+ return table_;
+ }
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableFieldBuilder() {
+ if (tableBuilder_ == null) {
+ tableBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ table_,
+ getParentForChildren(),
+ isClean());
+ table_ = null;
+ }
+ return tableBuilder_;
+ }
+
+ // required string target_dir = 2;
+ private java.lang.Object targetDir_ = "";
+ /**
+ * required string target_dir = 2;
+ */
+ public boolean hasTargetDir() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string target_dir = 2;
+ */
+ public java.lang.String getTargetDir() {
+ java.lang.Object ref = targetDir_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ targetDir_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string target_dir = 2;
+ */
+ public com.google.protobuf.ByteString
+ getTargetDirBytes() {
+ java.lang.Object ref = targetDir_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ targetDir_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string target_dir = 2;
+ */
+ public Builder setTargetDir(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ targetDir_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string target_dir = 2;
+ */
+ public Builder clearTargetDir() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ targetDir_ = getDefaultInstance().getTargetDir();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string target_dir = 2;
+ */
+ public Builder setTargetDirBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ targetDir_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string snapshot = 3;
+ private java.lang.Object snapshot_ = "";
+ /**
+ * optional string snapshot = 3;
+ */
+ public boolean hasSnapshot() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional string snapshot = 3;
+ */
+ public java.lang.String getSnapshot() {
+ java.lang.Object ref = snapshot_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ snapshot_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string snapshot = 3;
+ */
+ public com.google.protobuf.ByteString
+ getSnapshotBytes() {
+ java.lang.Object ref = snapshot_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ snapshot_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string snapshot = 3;
+ */
+ public Builder setSnapshot(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ snapshot_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string snapshot = 3;
+ */
+ public Builder clearSnapshot() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ snapshot_ = getDefaultInstance().getSnapshot();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string snapshot = 3;
+ */
+ public Builder setSnapshotBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ snapshot_ = value;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.TableBackupStatus)
+ }
+
+ static {
+ defaultInstance = new TableBackupStatus(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.TableBackupStatus)
+ }
+
+ public interface BackupContextOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string backup_id = 1;
+ /**
+ * required string backup_id = 1;
+ */
+ boolean hasBackupId();
+ /**
+ * required string backup_id = 1;
+ */
+ java.lang.String getBackupId();
+ /**
+ * required string backup_id = 1;
+ */
+ com.google.protobuf.ByteString
+ getBackupIdBytes();
+
+ // required .hbase.pb.BackupType type = 2;
+ /**
+ * required .hbase.pb.BackupType type = 2;
+ */
+ boolean hasType();
+ /**
+ * required .hbase.pb.BackupType type = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType();
+
+ // required string target_root_dir = 3;
+ /**
+ * required string target_root_dir = 3;
+ */
+ boolean hasTargetRootDir();
+ /**
+ * required string target_root_dir = 3;
+ */
+ java.lang.String getTargetRootDir();
+ /**
+ * required string target_root_dir = 3;
+ */
+ com.google.protobuf.ByteString
+ getTargetRootDirBytes();
+
+ // optional .hbase.pb.BackupContext.BackupState state = 4;
+ /**
+ * optional .hbase.pb.BackupContext.BackupState state = 4;
+ */
+ boolean hasState();
+ /**
+ * optional .hbase.pb.BackupContext.BackupState state = 4;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState getState();
+
+ // optional .hbase.pb.BackupContext.BackupPhase phase = 5;
+ /**
+ * optional .hbase.pb.BackupContext.BackupPhase phase = 5;
+ */
+ boolean hasPhase();
+ /**
+ * optional .hbase.pb.BackupContext.BackupPhase phase = 5;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase getPhase();
+
+ // optional string failed_message = 6;
+ /**
+ * optional string failed_message = 6;
+ */
+ boolean hasFailedMessage();
+ /**
+ * optional string failed_message = 6;
+ */
+ java.lang.String getFailedMessage();
+ /**
+ * optional string failed_message = 6;
+ */
+ com.google.protobuf.ByteString
+ getFailedMessageBytes();
+
+ // repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ java.util.List
+ getTableBackupStatusList();
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getTableBackupStatus(int index);
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ int getTableBackupStatusCount();
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder>
+ getTableBackupStatusOrBuilderList();
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getTableBackupStatusOrBuilder(
+ int index);
+
+ // optional uint64 start_ts = 8;
+ /**
+ * optional uint64 start_ts = 8;
+ */
+ boolean hasStartTs();
+ /**
+ * optional uint64 start_ts = 8;
+ */
+ long getStartTs();
+
+ // optional uint64 end_ts = 9;
+ /**
+ * optional uint64 end_ts = 9;
+ */
+ boolean hasEndTs();
+ /**
+ * optional uint64 end_ts = 9;
+ */
+ long getEndTs();
+
+ // optional int64 total_bytes_copied = 10;
+ /**
+ * optional int64 total_bytes_copied = 10;
+ */
+ boolean hasTotalBytesCopied();
+ /**
+ * optional int64 total_bytes_copied = 10;
+ */
+ long getTotalBytesCopied();
+
+ // optional string hlog_target_dir = 11;
+ /**
+ * optional string hlog_target_dir = 11;
+ */
+ boolean hasHlogTargetDir();
+ /**
+ * optional string hlog_target_dir = 11;
+ */
+ java.lang.String getHlogTargetDir();
+ /**
+ * optional string hlog_target_dir = 11;
+ */
+ com.google.protobuf.ByteString
+ getHlogTargetDirBytes();
+
+ // optional uint32 progress = 12;
+ /**
+ * optional uint32 progress = 12;
+ */
+ boolean hasProgress();
+ /**
+ * optional uint32 progress = 12;
+ */
+ int getProgress();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.BackupContext}
+ */
+ public static final class BackupContext extends
+ com.google.protobuf.GeneratedMessage
+ implements BackupContextOrBuilder {
+ // Use BackupContext.newBuilder() to construct.
+ private BackupContext(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private BackupContext(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final BackupContext defaultInstance;
+ public static BackupContext getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public BackupContext getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private BackupContext(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ backupId_ = input.readBytes();
+ break;
+ }
+ case 16: {
+ int rawValue = input.readEnum();
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(2, rawValue);
+ } else {
+ bitField0_ |= 0x00000002;
+ type_ = value;
+ }
+ break;
+ }
+ case 26: {
+ bitField0_ |= 0x00000004;
+ targetRootDir_ = input.readBytes();
+ break;
+ }
+ case 32: {
+ int rawValue = input.readEnum();
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(4, rawValue);
+ } else {
+ bitField0_ |= 0x00000008;
+ state_ = value;
+ }
+ break;
+ }
+ case 40: {
+ int rawValue = input.readEnum();
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(5, rawValue);
+ } else {
+ bitField0_ |= 0x00000010;
+ phase_ = value;
+ }
+ break;
+ }
+ case 50: {
+ bitField0_ |= 0x00000020;
+ failedMessage_ = input.readBytes();
+ break;
+ }
+ case 58: {
+ if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+ tableBackupStatus_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000040;
+ }
+ tableBackupStatus_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.PARSER, extensionRegistry));
+ break;
+ }
+ case 64: {
+ bitField0_ |= 0x00000040;
+ startTs_ = input.readUInt64();
+ break;
+ }
+ case 72: {
+ bitField0_ |= 0x00000080;
+ endTs_ = input.readUInt64();
+ break;
+ }
+ case 80: {
+ bitField0_ |= 0x00000100;
+ totalBytesCopied_ = input.readInt64();
+ break;
+ }
+ case 90: {
+ bitField0_ |= 0x00000200;
+ hlogTargetDir_ = input.readBytes();
+ break;
+ }
+ case 96: {
+ bitField0_ |= 0x00000400;
+ progress_ = input.readUInt32();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+ tableBackupStatus_ = java.util.Collections.unmodifiableList(tableBackupStatus_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public BackupContext parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new BackupContext(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ /**
+ * Protobuf enum {@code hbase.pb.BackupContext.BackupState}
+ */
+ public enum BackupState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * WAITING = 0;
+ */
+ WAITING(0, 0),
+ /**
+ * RUNNING = 1;
+ */
+ RUNNING(1, 1),
+ /**
+ * COMPLETE = 2;
+ */
+ COMPLETE(2, 2),
+ /**
+ * FAILED = 3;
+ */
+ FAILED(3, 3),
+ /**
+ * CANCELLED = 4;
+ */
+ CANCELLED(4, 4),
+ ;
+
+ /**
+ * WAITING = 0;
+ */
+ public static final int WAITING_VALUE = 0;
+ /**
+ * RUNNING = 1;
+ */
+ public static final int RUNNING_VALUE = 1;
+ /**
+ * COMPLETE = 2;
+ */
+ public static final int COMPLETE_VALUE = 2;
+ /**
+ * FAILED = 3;
+ */
+ public static final int FAILED_VALUE = 3;
+ /**
+ * CANCELLED = 4;
+ */
+ public static final int CANCELLED_VALUE = 4;
+
+
+ public final int getNumber() { return value; }
+
+ public static BackupState valueOf(int value) {
+ switch (value) {
+ case 0: return WAITING;
+ case 1: return RUNNING;
+ case 2: return COMPLETE;
+ case 3: return FAILED;
+ case 4: return CANCELLED;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public BackupState findValueByNumber(int number) {
+ return BackupState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDescriptor().getEnumTypes().get(0);
+ }
+
+ private static final BackupState[] VALUES = values();
+
+ public static BackupState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private BackupState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:hbase.pb.BackupContext.BackupState)
+ }
+
+ /**
+ * Protobuf enum {@code hbase.pb.BackupContext.BackupPhase}
+ */
+ public enum BackupPhase
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * REQUEST = 0;
+ */
+ REQUEST(0, 0),
+ /**
+ * SNAPSHOT = 1;
+ */
+ SNAPSHOT(1, 1),
+ /**
+ * PREPARE_INCREMENTAL = 2;
+ */
+ PREPARE_INCREMENTAL(2, 2),
+ /**
+ * SNAPSHOTCOPY = 3;
+ */
+ SNAPSHOTCOPY(3, 3),
+ /**
+ * INCREMENTAL_COPY = 4;
+ */
+ INCREMENTAL_COPY(4, 4),
+ /**
+ * STORE_MANIFEST = 5;
+ */
+ STORE_MANIFEST(5, 5),
+ ;
+
+ /**
+ * REQUEST = 0;
+ */
+ public static final int REQUEST_VALUE = 0;
+ /**
+ * SNAPSHOT = 1;
+ */
+ public static final int SNAPSHOT_VALUE = 1;
+ /**
+ * PREPARE_INCREMENTAL = 2;
+ */
+ public static final int PREPARE_INCREMENTAL_VALUE = 2;
+ /**
+ * SNAPSHOTCOPY = 3;
+ */
+ public static final int SNAPSHOTCOPY_VALUE = 3;
+ /**
+ * INCREMENTAL_COPY = 4;
+ */
+ public static final int INCREMENTAL_COPY_VALUE = 4;
+ /**
+ * STORE_MANIFEST = 5;
+ */
+ public static final int STORE_MANIFEST_VALUE = 5;
+
+
+ public final int getNumber() { return value; }
+
+ public static BackupPhase valueOf(int value) {
+ switch (value) {
+ case 0: return REQUEST;
+ case 1: return SNAPSHOT;
+ case 2: return PREPARE_INCREMENTAL;
+ case 3: return SNAPSHOTCOPY;
+ case 4: return INCREMENTAL_COPY;
+ case 5: return STORE_MANIFEST;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public BackupPhase findValueByNumber(int number) {
+ return BackupPhase.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDescriptor().getEnumTypes().get(1);
+ }
+
+ private static final BackupPhase[] VALUES = values();
+
+ public static BackupPhase valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private BackupPhase(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:hbase.pb.BackupContext.BackupPhase)
+ }
+
+ private int bitField0_;
+ // required string backup_id = 1;
+ public static final int BACKUP_ID_FIELD_NUMBER = 1;
+ private java.lang.Object backupId_;
+ /**
+ * required string backup_id = 1;
+ */
+ public boolean hasBackupId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string backup_id = 1;
+ */
+ public java.lang.String getBackupId() {
+ java.lang.Object ref = backupId_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ backupId_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string backup_id = 1;
+ */
+ public com.google.protobuf.ByteString
+ getBackupIdBytes() {
+ java.lang.Object ref = backupId_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ backupId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required .hbase.pb.BackupType type = 2;
+ public static final int TYPE_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_;
+ /**
+ * required .hbase.pb.BackupType type = 2;
+ */
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .hbase.pb.BackupType type = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() {
+ return type_;
+ }
+
+ // required string target_root_dir = 3;
+ public static final int TARGET_ROOT_DIR_FIELD_NUMBER = 3;
+ private java.lang.Object targetRootDir_;
+ /**
+ * required string target_root_dir = 3;
+ */
+ public boolean hasTargetRootDir() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required string target_root_dir = 3;
+ */
+ public java.lang.String getTargetRootDir() {
+ java.lang.Object ref = targetRootDir_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ targetRootDir_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string target_root_dir = 3;
+ */
+ public com.google.protobuf.ByteString
+ getTargetRootDirBytes() {
+ java.lang.Object ref = targetRootDir_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ targetRootDir_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional .hbase.pb.BackupContext.BackupState state = 4;
+ public static final int STATE_FIELD_NUMBER = 4;
+ private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState state_;
+ /**
+ * optional .hbase.pb.BackupContext.BackupState state = 4;
+ */
+ public boolean hasState() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional .hbase.pb.BackupContext.BackupState state = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState getState() {
+ return state_;
+ }
+
+ // optional .hbase.pb.BackupContext.BackupPhase phase = 5;
+ public static final int PHASE_FIELD_NUMBER = 5;
+ private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase phase_;
+ /**
+ * optional .hbase.pb.BackupContext.BackupPhase phase = 5;
+ */
+ public boolean hasPhase() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * optional .hbase.pb.BackupContext.BackupPhase phase = 5;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase getPhase() {
+ return phase_;
+ }
+
+ // optional string failed_message = 6;
+ public static final int FAILED_MESSAGE_FIELD_NUMBER = 6;
+ private java.lang.Object failedMessage_;
+ /**
+ * optional string failed_message = 6;
+ */
+ public boolean hasFailedMessage() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * optional string failed_message = 6;
+ */
+ public java.lang.String getFailedMessage() {
+ java.lang.Object ref = failedMessage_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ failedMessage_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string failed_message = 6;
+ */
+ public com.google.protobuf.ByteString
+ getFailedMessageBytes() {
+ java.lang.Object ref = failedMessage_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ failedMessage_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ public static final int TABLE_BACKUP_STATUS_FIELD_NUMBER = 7;
+ private java.util.List tableBackupStatus_;
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public java.util.List getTableBackupStatusList() {
+ return tableBackupStatus_;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder>
+ getTableBackupStatusOrBuilderList() {
+ return tableBackupStatus_;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public int getTableBackupStatusCount() {
+ return tableBackupStatus_.size();
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getTableBackupStatus(int index) {
+ return tableBackupStatus_.get(index);
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getTableBackupStatusOrBuilder(
+ int index) {
+ return tableBackupStatus_.get(index);
+ }
+
+ // optional uint64 start_ts = 8;
+ public static final int START_TS_FIELD_NUMBER = 8;
+ private long startTs_;
+ /**
+ * optional uint64 start_ts = 8;
+ */
+ public boolean hasStartTs() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * optional uint64 start_ts = 8;
+ */
+ public long getStartTs() {
+ return startTs_;
+ }
+
+ // optional uint64 end_ts = 9;
+ public static final int END_TS_FIELD_NUMBER = 9;
+ private long endTs_;
+ /**
+ * optional uint64 end_ts = 9;
+ */
+ public boolean hasEndTs() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ /**
+ * optional uint64 end_ts = 9;
+ */
+ public long getEndTs() {
+ return endTs_;
+ }
+
+ // optional int64 total_bytes_copied = 10;
+ public static final int TOTAL_BYTES_COPIED_FIELD_NUMBER = 10;
+ private long totalBytesCopied_;
+ /**
+ * optional int64 total_bytes_copied = 10;
+ */
+ public boolean hasTotalBytesCopied() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * optional int64 total_bytes_copied = 10;
+ */
+ public long getTotalBytesCopied() {
+ return totalBytesCopied_;
+ }
+
+ // optional string hlog_target_dir = 11;
+ public static final int HLOG_TARGET_DIR_FIELD_NUMBER = 11;
+ private java.lang.Object hlogTargetDir_;
+ /**
+ * optional string hlog_target_dir = 11;
+ */
+ public boolean hasHlogTargetDir() {
+ return ((bitField0_ & 0x00000200) == 0x00000200);
+ }
+ /**
+ * optional string hlog_target_dir = 11;
+ */
+ public java.lang.String getHlogTargetDir() {
+ java.lang.Object ref = hlogTargetDir_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ hlogTargetDir_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string hlog_target_dir = 11;
+ */
+ public com.google.protobuf.ByteString
+ getHlogTargetDirBytes() {
+ java.lang.Object ref = hlogTargetDir_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ hlogTargetDir_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional uint32 progress = 12;
+ public static final int PROGRESS_FIELD_NUMBER = 12;
+ private int progress_;
+ /**
+ * optional uint32 progress = 12;
+ */
+ public boolean hasProgress() {
+ return ((bitField0_ & 0x00000400) == 0x00000400);
+ }
+ /**
+ * optional uint32 progress = 12;
+ */
+ public int getProgress() {
+ return progress_;
+ }
+
+ private void initFields() {
+ backupId_ = "";
+ type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL;
+ targetRootDir_ = "";
+ state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.WAITING;
+ phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.REQUEST;
+ failedMessage_ = "";
+ tableBackupStatus_ = java.util.Collections.emptyList();
+ startTs_ = 0L;
+ endTs_ = 0L;
+ totalBytesCopied_ = 0L;
+ hlogTargetDir_ = "";
+ progress_ = 0;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasBackupId()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasType()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTargetRootDir()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getTableBackupStatusCount(); i++) {
+ if (!getTableBackupStatus(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getBackupIdBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeEnum(2, type_.getNumber());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBytes(3, getTargetRootDirBytes());
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeEnum(4, state_.getNumber());
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeEnum(5, phase_.getNumber());
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ output.writeBytes(6, getFailedMessageBytes());
+ }
+ for (int i = 0; i < tableBackupStatus_.size(); i++) {
+ output.writeMessage(7, tableBackupStatus_.get(i));
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ output.writeUInt64(8, startTs_);
+ }
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ output.writeUInt64(9, endTs_);
+ }
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ output.writeInt64(10, totalBytesCopied_);
+ }
+ if (((bitField0_ & 0x00000200) == 0x00000200)) {
+ output.writeBytes(11, getHlogTargetDirBytes());
+ }
+ if (((bitField0_ & 0x00000400) == 0x00000400)) {
+ output.writeUInt32(12, progress_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getBackupIdBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(2, type_.getNumber());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(3, getTargetRootDirBytes());
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(4, state_.getNumber());
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(5, phase_.getNumber());
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(6, getFailedMessageBytes());
+ }
+ for (int i = 0; i < tableBackupStatus_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(7, tableBackupStatus_.get(i));
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(8, startTs_);
+ }
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(9, endTs_);
+ }
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(10, totalBytesCopied_);
+ }
+ if (((bitField0_ & 0x00000200) == 0x00000200)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(11, getHlogTargetDirBytes());
+ }
+ if (((bitField0_ & 0x00000400) == 0x00000400)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt32Size(12, progress_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext) obj;
+
+ boolean result = true;
+ result = result && (hasBackupId() == other.hasBackupId());
+ if (hasBackupId()) {
+ result = result && getBackupId()
+ .equals(other.getBackupId());
+ }
+ result = result && (hasType() == other.hasType());
+ if (hasType()) {
+ result = result &&
+ (getType() == other.getType());
+ }
+ result = result && (hasTargetRootDir() == other.hasTargetRootDir());
+ if (hasTargetRootDir()) {
+ result = result && getTargetRootDir()
+ .equals(other.getTargetRootDir());
+ }
+ result = result && (hasState() == other.hasState());
+ if (hasState()) {
+ result = result &&
+ (getState() == other.getState());
+ }
+ result = result && (hasPhase() == other.hasPhase());
+ if (hasPhase()) {
+ result = result &&
+ (getPhase() == other.getPhase());
+ }
+ result = result && (hasFailedMessage() == other.hasFailedMessage());
+ if (hasFailedMessage()) {
+ result = result && getFailedMessage()
+ .equals(other.getFailedMessage());
+ }
+ result = result && getTableBackupStatusList()
+ .equals(other.getTableBackupStatusList());
+ result = result && (hasStartTs() == other.hasStartTs());
+ if (hasStartTs()) {
+ result = result && (getStartTs()
+ == other.getStartTs());
+ }
+ result = result && (hasEndTs() == other.hasEndTs());
+ if (hasEndTs()) {
+ result = result && (getEndTs()
+ == other.getEndTs());
+ }
+ result = result && (hasTotalBytesCopied() == other.hasTotalBytesCopied());
+ if (hasTotalBytesCopied()) {
+ result = result && (getTotalBytesCopied()
+ == other.getTotalBytesCopied());
+ }
+ result = result && (hasHlogTargetDir() == other.hasHlogTargetDir());
+ if (hasHlogTargetDir()) {
+ result = result && getHlogTargetDir()
+ .equals(other.getHlogTargetDir());
+ }
+ result = result && (hasProgress() == other.hasProgress());
+ if (hasProgress()) {
+ result = result && (getProgress()
+ == other.getProgress());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasBackupId()) {
+ hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER;
+ hash = (53 * hash) + getBackupId().hashCode();
+ }
+ if (hasType()) {
+ hash = (37 * hash) + TYPE_FIELD_NUMBER;
+ hash = (53 * hash) + hashEnum(getType());
+ }
+ if (hasTargetRootDir()) {
+ hash = (37 * hash) + TARGET_ROOT_DIR_FIELD_NUMBER;
+ hash = (53 * hash) + getTargetRootDir().hashCode();
+ }
+ if (hasState()) {
+ hash = (37 * hash) + STATE_FIELD_NUMBER;
+ hash = (53 * hash) + hashEnum(getState());
+ }
+ if (hasPhase()) {
+ hash = (37 * hash) + PHASE_FIELD_NUMBER;
+ hash = (53 * hash) + hashEnum(getPhase());
+ }
+ if (hasFailedMessage()) {
+ hash = (37 * hash) + FAILED_MESSAGE_FIELD_NUMBER;
+ hash = (53 * hash) + getFailedMessage().hashCode();
+ }
+ if (getTableBackupStatusCount() > 0) {
+ hash = (37 * hash) + TABLE_BACKUP_STATUS_FIELD_NUMBER;
+ hash = (53 * hash) + getTableBackupStatusList().hashCode();
+ }
+ if (hasStartTs()) {
+ hash = (37 * hash) + START_TS_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getStartTs());
+ }
+ if (hasEndTs()) {
+ hash = (37 * hash) + END_TS_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getEndTs());
+ }
+ if (hasTotalBytesCopied()) {
+ hash = (37 * hash) + TOTAL_BYTES_COPIED_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getTotalBytesCopied());
+ }
+ if (hasHlogTargetDir()) {
+ hash = (37 * hash) + HLOG_TARGET_DIR_FIELD_NUMBER;
+ hash = (53 * hash) + getHlogTargetDir().hashCode();
+ }
+ if (hasProgress()) {
+ hash = (37 * hash) + PROGRESS_FIELD_NUMBER;
+ hash = (53 * hash) + getProgress();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.BackupContext}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContextOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getTableBackupStatusFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ backupId_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ targetRootDir_ = "";
+ bitField0_ = (bitField0_ & ~0x00000004);
+ state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.WAITING;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.REQUEST;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ failedMessage_ = "";
+ bitField0_ = (bitField0_ & ~0x00000020);
+ if (tableBackupStatusBuilder_ == null) {
+ tableBackupStatus_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000040);
+ } else {
+ tableBackupStatusBuilder_.clear();
+ }
+ startTs_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000080);
+ endTs_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000100);
+ totalBytesCopied_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000200);
+ hlogTargetDir_ = "";
+ bitField0_ = (bitField0_ & ~0x00000400);
+ progress_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000800);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext build() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.backupId_ = backupId_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.type_ = type_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.targetRootDir_ = targetRootDir_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.state_ = state_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ result.phase_ = phase_;
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+ to_bitField0_ |= 0x00000020;
+ }
+ result.failedMessage_ = failedMessage_;
+ if (tableBackupStatusBuilder_ == null) {
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ tableBackupStatus_ = java.util.Collections.unmodifiableList(tableBackupStatus_);
+ bitField0_ = (bitField0_ & ~0x00000040);
+ }
+ result.tableBackupStatus_ = tableBackupStatus_;
+ } else {
+ result.tableBackupStatus_ = tableBackupStatusBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
+ to_bitField0_ |= 0x00000040;
+ }
+ result.startTs_ = startTs_;
+ if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
+ to_bitField0_ |= 0x00000080;
+ }
+ result.endTs_ = endTs_;
+ if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
+ to_bitField0_ |= 0x00000100;
+ }
+ result.totalBytesCopied_ = totalBytesCopied_;
+ if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
+ to_bitField0_ |= 0x00000200;
+ }
+ result.hlogTargetDir_ = hlogTargetDir_;
+ if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
+ to_bitField0_ |= 0x00000400;
+ }
+ result.progress_ = progress_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance()) return this;
+ if (other.hasBackupId()) {
+ bitField0_ |= 0x00000001;
+ backupId_ = other.backupId_;
+ onChanged();
+ }
+ if (other.hasType()) {
+ setType(other.getType());
+ }
+ if (other.hasTargetRootDir()) {
+ bitField0_ |= 0x00000004;
+ targetRootDir_ = other.targetRootDir_;
+ onChanged();
+ }
+ if (other.hasState()) {
+ setState(other.getState());
+ }
+ if (other.hasPhase()) {
+ setPhase(other.getPhase());
+ }
+ if (other.hasFailedMessage()) {
+ bitField0_ |= 0x00000020;
+ failedMessage_ = other.failedMessage_;
+ onChanged();
+ }
+ if (tableBackupStatusBuilder_ == null) {
+ if (!other.tableBackupStatus_.isEmpty()) {
+ if (tableBackupStatus_.isEmpty()) {
+ tableBackupStatus_ = other.tableBackupStatus_;
+ bitField0_ = (bitField0_ & ~0x00000040);
+ } else {
+ ensureTableBackupStatusIsMutable();
+ tableBackupStatus_.addAll(other.tableBackupStatus_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.tableBackupStatus_.isEmpty()) {
+ if (tableBackupStatusBuilder_.isEmpty()) {
+ tableBackupStatusBuilder_.dispose();
+ tableBackupStatusBuilder_ = null;
+ tableBackupStatus_ = other.tableBackupStatus_;
+ bitField0_ = (bitField0_ & ~0x00000040);
+ tableBackupStatusBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getTableBackupStatusFieldBuilder() : null;
+ } else {
+ tableBackupStatusBuilder_.addAllMessages(other.tableBackupStatus_);
+ }
+ }
+ }
+ if (other.hasStartTs()) {
+ setStartTs(other.getStartTs());
+ }
+ if (other.hasEndTs()) {
+ setEndTs(other.getEndTs());
+ }
+ if (other.hasTotalBytesCopied()) {
+ setTotalBytesCopied(other.getTotalBytesCopied());
+ }
+ if (other.hasHlogTargetDir()) {
+ bitField0_ |= 0x00000400;
+ hlogTargetDir_ = other.hlogTargetDir_;
+ onChanged();
+ }
+ if (other.hasProgress()) {
+ setProgress(other.getProgress());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasBackupId()) {
+
+ return false;
+ }
+ if (!hasType()) {
+
+ return false;
+ }
+ if (!hasTargetRootDir()) {
+
+ return false;
+ }
+ for (int i = 0; i < getTableBackupStatusCount(); i++) {
+ if (!getTableBackupStatus(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string backup_id = 1;
+ private java.lang.Object backupId_ = "";
+ /**
+ * required string backup_id = 1;
+ */
+ public boolean hasBackupId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string backup_id = 1;
+ */
+ public java.lang.String getBackupId() {
+ java.lang.Object ref = backupId_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ backupId_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string backup_id = 1;
+ */
+ public com.google.protobuf.ByteString
+ getBackupIdBytes() {
+ java.lang.Object ref = backupId_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ backupId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string backup_id = 1;
+ */
+ public Builder setBackupId(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ backupId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string backup_id = 1;
+ */
+ public Builder clearBackupId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ backupId_ = getDefaultInstance().getBackupId();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string backup_id = 1;
+ */
+ public Builder setBackupIdBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ backupId_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required .hbase.pb.BackupType type = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL;
+ /**
+ * required .hbase.pb.BackupType type = 2;
+ */
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .hbase.pb.BackupType type = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() {
+ return type_;
+ }
+ /**
+ * required .hbase.pb.BackupType type = 2;
+ */
+ public Builder setType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ type_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required .hbase.pb.BackupType type = 2;
+ */
+ public Builder clearType() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL;
+ onChanged();
+ return this;
+ }
+
+ // required string target_root_dir = 3;
+ private java.lang.Object targetRootDir_ = "";
+ /**
+ * required string target_root_dir = 3;
+ */
+ public boolean hasTargetRootDir() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required string target_root_dir = 3;
+ */
+ public java.lang.String getTargetRootDir() {
+ java.lang.Object ref = targetRootDir_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ targetRootDir_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string target_root_dir = 3;
+ */
+ public com.google.protobuf.ByteString
+ getTargetRootDirBytes() {
+ java.lang.Object ref = targetRootDir_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ targetRootDir_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string target_root_dir = 3;
+ */
+ public Builder setTargetRootDir(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ targetRootDir_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string target_root_dir = 3;
+ */
+ public Builder clearTargetRootDir() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ targetRootDir_ = getDefaultInstance().getTargetRootDir();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string target_root_dir = 3;
+ */
+ public Builder setTargetRootDirBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ targetRootDir_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional .hbase.pb.BackupContext.BackupState state = 4;
+ private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.WAITING;
+ /**
+ * optional .hbase.pb.BackupContext.BackupState state = 4;
+ */
+ public boolean hasState() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional .hbase.pb.BackupContext.BackupState state = 4;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState getState() {
+ return state_;
+ }
+ /**
+ * optional .hbase.pb.BackupContext.BackupState state = 4;
+ */
+ public Builder setState(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ state_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional .hbase.pb.BackupContext.BackupState state = 4;
+ */
+ public Builder clearState() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.WAITING;
+ onChanged();
+ return this;
+ }
+
+ // optional .hbase.pb.BackupContext.BackupPhase phase = 5;
+ private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.REQUEST;
+ /**
+ * optional .hbase.pb.BackupContext.BackupPhase phase = 5;
+ */
+ public boolean hasPhase() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * optional .hbase.pb.BackupContext.BackupPhase phase = 5;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase getPhase() {
+ return phase_;
+ }
+ /**
+ * optional .hbase.pb.BackupContext.BackupPhase phase = 5;
+ */
+ public Builder setPhase(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000010;
+ phase_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional .hbase.pb.BackupContext.BackupPhase phase = 5;
+ */
+ public Builder clearPhase() {
+ bitField0_ = (bitField0_ & ~0x00000010);
+ phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.REQUEST;
+ onChanged();
+ return this;
+ }
+
+ // optional string failed_message = 6;
+ private java.lang.Object failedMessage_ = "";
+ /**
+ * optional string failed_message = 6;
+ */
+ public boolean hasFailedMessage() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * optional string failed_message = 6;
+ */
+ public java.lang.String getFailedMessage() {
+ java.lang.Object ref = failedMessage_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ failedMessage_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string failed_message = 6;
+ */
+ public com.google.protobuf.ByteString
+ getFailedMessageBytes() {
+ java.lang.Object ref = failedMessage_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ failedMessage_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string failed_message = 6;
+ */
+ public Builder setFailedMessage(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000020;
+ failedMessage_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string failed_message = 6;
+ */
+ public Builder clearFailedMessage() {
+ bitField0_ = (bitField0_ & ~0x00000020);
+ failedMessage_ = getDefaultInstance().getFailedMessage();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string failed_message = 6;
+ */
+ public Builder setFailedMessageBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000020;
+ failedMessage_ = value;
+ onChanged();
+ return this;
+ }
+
+ // repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ private java.util.List tableBackupStatus_ =
+ java.util.Collections.emptyList();
+ private void ensureTableBackupStatusIsMutable() {
+ if (!((bitField0_ & 0x00000040) == 0x00000040)) {
+ tableBackupStatus_ = new java.util.ArrayList(tableBackupStatus_);
+ bitField0_ |= 0x00000040;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder> tableBackupStatusBuilder_;
+
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public java.util.List getTableBackupStatusList() {
+ if (tableBackupStatusBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(tableBackupStatus_);
+ } else {
+ return tableBackupStatusBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public int getTableBackupStatusCount() {
+ if (tableBackupStatusBuilder_ == null) {
+ return tableBackupStatus_.size();
+ } else {
+ return tableBackupStatusBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getTableBackupStatus(int index) {
+ if (tableBackupStatusBuilder_ == null) {
+ return tableBackupStatus_.get(index);
+ } else {
+ return tableBackupStatusBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public Builder setTableBackupStatus(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) {
+ if (tableBackupStatusBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTableBackupStatusIsMutable();
+ tableBackupStatus_.set(index, value);
+ onChanged();
+ } else {
+ tableBackupStatusBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public Builder setTableBackupStatus(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) {
+ if (tableBackupStatusBuilder_ == null) {
+ ensureTableBackupStatusIsMutable();
+ tableBackupStatus_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ tableBackupStatusBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public Builder addTableBackupStatus(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) {
+ if (tableBackupStatusBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTableBackupStatusIsMutable();
+ tableBackupStatus_.add(value);
+ onChanged();
+ } else {
+ tableBackupStatusBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public Builder addTableBackupStatus(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) {
+ if (tableBackupStatusBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTableBackupStatusIsMutable();
+ tableBackupStatus_.add(index, value);
+ onChanged();
+ } else {
+ tableBackupStatusBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public Builder addTableBackupStatus(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) {
+ if (tableBackupStatusBuilder_ == null) {
+ ensureTableBackupStatusIsMutable();
+ tableBackupStatus_.add(builderForValue.build());
+ onChanged();
+ } else {
+ tableBackupStatusBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public Builder addTableBackupStatus(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) {
+ if (tableBackupStatusBuilder_ == null) {
+ ensureTableBackupStatusIsMutable();
+ tableBackupStatus_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ tableBackupStatusBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public Builder addAllTableBackupStatus(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus> values) {
+ if (tableBackupStatusBuilder_ == null) {
+ ensureTableBackupStatusIsMutable();
+ super.addAll(values, tableBackupStatus_);
+ onChanged();
+ } else {
+ tableBackupStatusBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public Builder clearTableBackupStatus() {
+ if (tableBackupStatusBuilder_ == null) {
+ tableBackupStatus_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000040);
+ onChanged();
+ } else {
+ tableBackupStatusBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public Builder removeTableBackupStatus(int index) {
+ if (tableBackupStatusBuilder_ == null) {
+ ensureTableBackupStatusIsMutable();
+ tableBackupStatus_.remove(index);
+ onChanged();
+ } else {
+ tableBackupStatusBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder getTableBackupStatusBuilder(
+ int index) {
+ return getTableBackupStatusFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getTableBackupStatusOrBuilder(
+ int index) {
+ if (tableBackupStatusBuilder_ == null) {
+ return tableBackupStatus_.get(index); } else {
+ return tableBackupStatusBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder>
+ getTableBackupStatusOrBuilderList() {
+ if (tableBackupStatusBuilder_ != null) {
+ return tableBackupStatusBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(tableBackupStatus_);
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder addTableBackupStatusBuilder() {
+ return getTableBackupStatusFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder addTableBackupStatusBuilder(
+ int index) {
+ return getTableBackupStatusFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus table_backup_status = 7;
+ */
+ public java.util.List
+ getTableBackupStatusBuilderList() {
+ return getTableBackupStatusFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder>
+ getTableBackupStatusFieldBuilder() {
+ if (tableBackupStatusBuilder_ == null) {
+ tableBackupStatusBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder>(
+ tableBackupStatus_,
+ ((bitField0_ & 0x00000040) == 0x00000040),
+ getParentForChildren(),
+ isClean());
+ tableBackupStatus_ = null;
+ }
+ return tableBackupStatusBuilder_;
+ }
+
+ // optional uint64 start_ts = 8;
+ private long startTs_ ;
+ /**
+ * optional uint64 start_ts = 8;
+ */
+ public boolean hasStartTs() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ /**
+ * optional uint64 start_ts = 8;
+ */
+ public long getStartTs() {
+ return startTs_;
+ }
+ /**
+ * optional uint64 start_ts = 8;
+ */
+ public Builder setStartTs(long value) {
+ bitField0_ |= 0x00000080;
+ startTs_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional uint64 start_ts = 8;
+ */
+ public Builder clearStartTs() {
+ bitField0_ = (bitField0_ & ~0x00000080);
+ startTs_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional uint64 end_ts = 9;
+ private long endTs_ ;
+ /**
+ * optional uint64 end_ts = 9;
+ */
+ public boolean hasEndTs() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * optional uint64 end_ts = 9;
+ */
+ public long getEndTs() {
+ return endTs_;
+ }
+ /**
+ * optional uint64 end_ts = 9;
+ */
+ public Builder setEndTs(long value) {
+ bitField0_ |= 0x00000100;
+ endTs_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional uint64 end_ts = 9;
+ */
+ public Builder clearEndTs() {
+ bitField0_ = (bitField0_ & ~0x00000100);
+ endTs_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional int64 total_bytes_copied = 10;
+ private long totalBytesCopied_ ;
+ /**
+ * optional int64 total_bytes_copied = 10;
+ */
+ public boolean hasTotalBytesCopied() {
+ return ((bitField0_ & 0x00000200) == 0x00000200);
+ }
+ /**
+ * optional int64 total_bytes_copied = 10;
+ */
+ public long getTotalBytesCopied() {
+ return totalBytesCopied_;
+ }
+ /**
+ * optional int64 total_bytes_copied = 10;
+ */
+ public Builder setTotalBytesCopied(long value) {
+ bitField0_ |= 0x00000200;
+ totalBytesCopied_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int64 total_bytes_copied = 10;
+ */
+ public Builder clearTotalBytesCopied() {
+ bitField0_ = (bitField0_ & ~0x00000200);
+ totalBytesCopied_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional string hlog_target_dir = 11;
+ private java.lang.Object hlogTargetDir_ = "";
+ /**
+ * optional string hlog_target_dir = 11;
+ */
+ public boolean hasHlogTargetDir() {
+ return ((bitField0_ & 0x00000400) == 0x00000400);
+ }
+ /**
+ * optional string hlog_target_dir = 11;
+ */
+ public java.lang.String getHlogTargetDir() {
+ java.lang.Object ref = hlogTargetDir_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ hlogTargetDir_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string hlog_target_dir = 11;
+ */
+ public com.google.protobuf.ByteString
+ getHlogTargetDirBytes() {
+ java.lang.Object ref = hlogTargetDir_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ hlogTargetDir_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string hlog_target_dir = 11;
+ */
+ public Builder setHlogTargetDir(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000400;
+ hlogTargetDir_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string hlog_target_dir = 11;
+ */
+ public Builder clearHlogTargetDir() {
+ bitField0_ = (bitField0_ & ~0x00000400);
+ hlogTargetDir_ = getDefaultInstance().getHlogTargetDir();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string hlog_target_dir = 11;
+ */
+ public Builder setHlogTargetDirBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000400;
+ hlogTargetDir_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional uint32 progress = 12;
+ private int progress_ ;
+ /**
+ * optional uint32 progress = 12;
+ */
+ public boolean hasProgress() {
+ return ((bitField0_ & 0x00000800) == 0x00000800);
+ }
+ /**
+ * optional uint32 progress = 12;
+ */
+ public int getProgress() {
+ return progress_;
+ }
+ /**
+ * optional uint32 progress = 12;
+ */
+ public Builder setProgress(int value) {
+ bitField0_ |= 0x00000800;
+ progress_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional uint32 progress = 12;
+ */
+ public Builder clearProgress() {
+ bitField0_ = (bitField0_ & ~0x00000800);
+ progress_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.BackupContext)
+ }
+
+ static {
+ defaultInstance = new BackupContext(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.BackupContext)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_BackupImage_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_BackupImage_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_ServerTimestamp_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_TableServerTimestamp_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_BackupManifest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_BackupManifest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_TableBackupStatus_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_BackupContext_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_BackupContext_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\014Backup.proto\022\010hbase.pb\032\013HBase.proto\"\327\001" +
+ "\n\013BackupImage\022\021\n\tbackup_id\030\001 \002(\t\022)\n\013back" +
+ "up_type\030\002 \002(\0162\024.hbase.pb.BackupType\022\020\n\010r" +
+ "oot_dir\030\003 \002(\t\022\'\n\ntable_list\030\004 \003(\0132\023.hbas" +
+ "e.pb.TableName\022\020\n\010start_ts\030\005 \002(\004\022\023\n\013comp" +
+ "lete_ts\030\006 \002(\004\022(\n\tancestors\030\007 \003(\0132\025.hbase" +
+ ".pb.BackupImage\"4\n\017ServerTimestamp\022\016\n\006se" +
+ "rver\030\001 \002(\t\022\021\n\ttimestamp\030\002 \002(\004\"o\n\024TableSe" +
+ "rverTimestamp\022\"\n\005table\030\001 \002(\0132\023.hbase.pb." +
+ "TableName\0223\n\020server_timestamp\030\002 \003(\0132\031.hb",
+ "ase.pb.ServerTimestamp\"\313\002\n\016BackupManifes" +
+ "t\022\017\n\007version\030\001 \002(\t\022\021\n\tbackup_id\030\002 \002(\t\022\"\n" +
+ "\004type\030\003 \002(\0162\024.hbase.pb.BackupType\022\'\n\ntab" +
+ "le_list\030\004 \003(\0132\023.hbase.pb.TableName\022\020\n\010st" +
+ "art_ts\030\005 \002(\004\022\023\n\013complete_ts\030\006 \002(\004\022\023\n\013tot" +
+ "al_bytes\030\007 \002(\003\022\021\n\tlog_bytes\030\010 \001(\003\022/\n\007tst" +
+ "_map\030\t \003(\0132\036.hbase.pb.TableServerTimesta" +
+ "mp\0225\n\026dependent_backup_image\030\n \003(\0132\025.hba" +
+ "se.pb.BackupImage\022\021\n\tcompacted\030\013 \002(\010\"]\n\021" +
+ "TableBackupStatus\022\"\n\005table\030\001 \002(\0132\023.hbase",
+ ".pb.TableName\022\022\n\ntarget_dir\030\002 \002(\t\022\020\n\010sna" +
+ "pshot\030\003 \001(\t\"\323\004\n\rBackupContext\022\021\n\tbackup_" +
+ "id\030\001 \002(\t\022\"\n\004type\030\002 \002(\0162\024.hbase.pb.Backup" +
+ "Type\022\027\n\017target_root_dir\030\003 \002(\t\0222\n\005state\030\004" +
+ " \001(\0162#.hbase.pb.BackupContext.BackupStat" +
+ "e\0222\n\005phase\030\005 \001(\0162#.hbase.pb.BackupContex" +
+ "t.BackupPhase\022\026\n\016failed_message\030\006 \001(\t\0228\n" +
+ "\023table_backup_status\030\007 \003(\0132\033.hbase.pb.Ta" +
+ "bleBackupStatus\022\020\n\010start_ts\030\010 \001(\004\022\016\n\006end" +
+ "_ts\030\t \001(\004\022\032\n\022total_bytes_copied\030\n \001(\003\022\027\n",
+ "\017hlog_target_dir\030\013 \001(\t\022\020\n\010progress\030\014 \001(\r" +
+ "\"P\n\013BackupState\022\013\n\007WAITING\020\000\022\013\n\007RUNNING\020" +
+ "\001\022\014\n\010COMPLETE\020\002\022\n\n\006FAILED\020\003\022\r\n\tCANCELLED" +
+ "\020\004\"}\n\013BackupPhase\022\013\n\007REQUEST\020\000\022\014\n\010SNAPSH" +
+ "OT\020\001\022\027\n\023PREPARE_INCREMENTAL\020\002\022\020\n\014SNAPSHO" +
+ "TCOPY\020\003\022\024\n\020INCREMENTAL_COPY\020\004\022\022\n\016STORE_M" +
+ "ANIFEST\020\005*\'\n\nBackupType\022\010\n\004FULL\020\000\022\017\n\013INC" +
+ "REMENTAL\020\001BB\n*org.apache.hadoop.hbase.pr" +
+ "otobuf.generatedB\014BackupProtosH\001\210\001\001\240\001\001"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_hbase_pb_BackupImage_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_hbase_pb_BackupImage_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_BackupImage_descriptor,
+ new java.lang.String[] { "BackupId", "BackupType", "RootDir", "TableList", "StartTs", "CompleteTs", "Ancestors", });
+ internal_static_hbase_pb_ServerTimestamp_descriptor =
+ getDescriptor().getMessageTypes().get(1);
+ internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_ServerTimestamp_descriptor,
+ new java.lang.String[] { "Server", "Timestamp", });
+ internal_static_hbase_pb_TableServerTimestamp_descriptor =
+ getDescriptor().getMessageTypes().get(2);
+ internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_TableServerTimestamp_descriptor,
+ new java.lang.String[] { "Table", "ServerTimestamp", });
+ internal_static_hbase_pb_BackupManifest_descriptor =
+ getDescriptor().getMessageTypes().get(3);
+ internal_static_hbase_pb_BackupManifest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_BackupManifest_descriptor,
+ new java.lang.String[] { "Version", "BackupId", "Type", "TableList", "StartTs", "CompleteTs", "TotalBytes", "LogBytes", "TstMap", "DependentBackupImage", "Compacted", });
+ internal_static_hbase_pb_TableBackupStatus_descriptor =
+ getDescriptor().getMessageTypes().get(4);
+ internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_TableBackupStatus_descriptor,
+ new java.lang.String[] { "Table", "TargetDir", "Snapshot", });
+ internal_static_hbase_pb_BackupContext_descriptor =
+ getDescriptor().getMessageTypes().get(5);
+ internal_static_hbase_pb_BackupContext_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_BackupContext_descriptor,
+ new java.lang.String[] { "BackupId", "Type", "TargetRootDir", "State", "Phase", "FailedMessage", "TableBackupStatus", "StartTs", "EndTs", "TotalBytesCopied", "HlogTargetDir", "Progress", });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git a/hbase-protocol/src/main/protobuf/Backup.proto b/hbase-protocol/src/main/protobuf/Backup.proto
new file mode 100644
index 0000000..383b990
--- /dev/null
+++ b/hbase-protocol/src/main/protobuf/Backup.proto
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file contains Backup manifest
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "BackupProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "HBase.proto";
+
+enum BackupType {
+ FULL = 0;
+ INCREMENTAL = 1;
+}
+
+message BackupImage {
+ required string backup_id = 1;
+ required BackupType backup_type = 2;
+ required string root_dir = 3;
+ repeated TableName table_list = 4;
+ required uint64 start_ts = 5;
+ required uint64 complete_ts = 6;
+ repeated BackupImage ancestors = 7;
+}
+
+message ServerTimestamp {
+ required string server = 1;
+ required uint64 timestamp = 2;
+}
+
+message TableServerTimestamp {
+ required TableName table = 1;
+ repeated ServerTimestamp server_timestamp = 2;
+}
+
+message BackupManifest {
+ required string version = 1;
+ required string backup_id = 2;
+ required BackupType type = 3;
+ repeated TableName table_list = 4;
+ required uint64 start_ts = 5;
+ required uint64 complete_ts = 6;
+ required int64 total_bytes = 7;
+ optional int64 log_bytes = 8;
+ repeated TableServerTimestamp tst_map = 9;
+ repeated BackupImage dependent_backup_image = 10;
+ required bool compacted = 11;
+}
+
+message TableBackupStatus {
+ required TableName table = 1;
+ required string target_dir = 2;
+ optional string snapshot = 3;
+}
+
+message BackupContext {
+ required string backup_id = 1;
+ required BackupType type = 2;
+ required string target_root_dir = 3;
+ optional BackupState state = 4;
+ optional BackupPhase phase = 5;
+ optional string failed_message = 6;
+ repeated TableBackupStatus table_backup_status = 7;
+ optional uint64 start_ts = 8;
+ optional uint64 end_ts = 9;
+ optional int64 total_bytes_copied = 10;
+ optional string hlog_target_dir = 11;
+ optional uint32 progress = 12;
+
+ enum BackupState {
+ WAITING = 0;
+ RUNNING = 1;
+ COMPLETE = 2;
+ FAILED = 3;
+ CANCELLED = 4;
+ }
+
+ enum BackupPhase {
+ REQUEST = 0;
+ SNAPSHOT = 1;
+ PREPARE_INCREMENTAL = 2;
+ SNAPSHOTCOPY = 3;
+ INCREMENTAL_COPY = 4;
+ STORE_MANIFEST = 5;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java
new file mode 100644
index 0000000..7c8ea39
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
+
+public interface BackupClient {
+
+ public void setConf(Configuration conf);
+
+ /**
+ * Send backup request to server, and monitor the progress if necessary
+ * @param backupType : full or incremental
+ * @param targetRootDir : the root path specified by user
+ * @param tableList : the table list specified by user
+ * @return backupId backup id
+ * @throws IOException exception
+ */
+ public String create(BackupType backupType, List tableList,
+ String targetRootDir) throws IOException;
+ }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
new file mode 100644
index 0000000..015c80b
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.backup.impl.BackupCommands;
+import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants;
+import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants.BackupCommand;
+import org.apache.hadoop.hbase.util.AbstractHBaseTool;
+import org.apache.hadoop.hbase.util.LogUtils;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+
+public class BackupDriver extends AbstractHBaseTool {
+
+ private static final Log LOG = LogFactory.getLog(BackupDriver.class);
+ private Options opt;
+ private CommandLine cmd;
+
+ protected void init() throws IOException {
+ // define supported options
+ opt = new Options();
+ opt.addOption("debug", false, "Enable debug loggings");
+
+ // disable irrelevant loggers to avoid it mess up command output
+ LogUtils.disableUselessLoggers(LOG);
+ }
+
+ private int parseAndRun(String[] args) throws IOException {
+ String cmd = null;
+ String[] remainArgs = null;
+ if (args == null || args.length == 0) {
+ BackupCommands.createCommand(getConf(),
+ BackupRestoreConstants.BackupCommand.HELP, null).execute();
+ } else {
+ cmd = args[0];
+ remainArgs = new String[args.length - 1];
+ if (args.length > 1) {
+ System.arraycopy(args, 1, remainArgs, 0, args.length - 1);
+ }
+ }
+ CommandLine cmdline = null;
+ try {
+ cmdline = new PosixParser().parse(opt, remainArgs);
+ } catch (ParseException e) {
+ LOG.error("Could not parse command", e);
+ return -1;
+ }
+
+ BackupCommand type = BackupCommand.HELP;
+ if (BackupCommand.CREATE.name().equalsIgnoreCase(cmd)) {
+ type = BackupCommand.CREATE;
+ } else if (BackupCommand.HELP.name().equalsIgnoreCase(cmd)) {
+ type = BackupCommand.HELP;
+ } else {
+ System.out.println("Unsupported command for backup: " + cmd);
+ return -1;
+ }
+
+ // enable debug logging
+ Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup");
+ if (cmdline.hasOption("debug")) {
+ backupClientLogger.setLevel(Level.DEBUG);
+ } else {
+ backupClientLogger.setLevel(Level.INFO);
+ }
+
+ // TODO: get rid of Command altogether?
+ BackupCommands.createCommand(getConf(), type, cmdline).execute();
+ return 0;
+ }
+
+ @Override
+ protected void addOptions() {
+ }
+
+ @Override
+ protected void processOptions(CommandLine cmd) {
+ this.cmd = cmd;
+ }
+
+ @Override
+ protected int doWork() throws Exception {
+ init();
+ return parseAndRun(cmd.getArgs());
+ }
+
+ public static void main(String[] args) throws Exception {
+ Configuration conf = HBaseConfiguration.create();
+ int ret = ToolRunner.run(conf, new BackupDriver(), args);
+ System.exit(ret);
+ }
+
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java
new file mode 100644
index 0000000..6fbfe18
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.backup.impl.BackupClientImpl;
+import org.apache.hadoop.hbase.backup.impl.BackupCopyService;
+import org.apache.hadoop.hbase.backup.impl.IncrementalRestoreService;
+import org.apache.hadoop.hbase.backup.impl.RestoreClientImpl;
+import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupCopyService;
+import org.apache.hadoop.hbase.backup.mapreduce.MapReduceRestoreService;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.util.ReflectionUtils;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class BackupRestoreFactory {
+
+ public final static String HBASE_INCR_RESTORE_IMPL_CLASS = "hbase.incremental.restore.class";
+ public final static String HBASE_BACKUP_COPY_IMPL_CLASS = "hbase.backup.copy.class";
+ public final static String HBASE_BACKUP_CLIENT_IMPL_CLASS = "hbase.backup.client.class";
+ public final static String HBASE_RESTORE_CLIENT_IMPL_CLASS = "hbase.restore.client.class";
+
+ private BackupRestoreFactory(){
+ throw new AssertionError("Instantiating utility class...");
+ }
+
+ /**
+ * Gets incremental restore service
+ * @param conf - configuration
+ * @return incremental backup service instance
+ */
+ public static IncrementalRestoreService getIncrementalRestoreService(Configuration conf) {
+ Class extends IncrementalRestoreService> cls =
+ conf.getClass(HBASE_INCR_RESTORE_IMPL_CLASS, MapReduceRestoreService.class,
+ IncrementalRestoreService.class);
+ return ReflectionUtils.newInstance(cls, conf);
+ }
+
+ /**
+ * Gets backup copy service
+ * @param conf - configuration
+ * @return backup copy service
+ */
+ public static BackupCopyService getBackupCopyService(Configuration conf) {
+ Class extends BackupCopyService> cls =
+ conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS, MapReduceBackupCopyService.class,
+ BackupCopyService.class);
+ return ReflectionUtils.newInstance(cls, conf);
+ }
+
+ /**
+ * Gets backup client implementation
+ * @param conf - configuration
+ * @return backup client
+ */
+ public static BackupClient getBackupClient(Configuration conf) {
+ Class extends BackupClient> cls =
+ conf.getClass(HBASE_BACKUP_CLIENT_IMPL_CLASS, BackupClientImpl.class,
+ BackupClient.class);
+ BackupClient client = ReflectionUtils.newInstance(cls, conf);
+ client.setConf(conf);
+ return client;
+ }
+
+ /**
+ * Gets restore client implementation
+ * @param conf - configuration
+ * @return backup client
+ */
+ public static RestoreClient getRestoreClient(Configuration conf) {
+ Class extends RestoreClient> cls =
+ conf.getClass(HBASE_RESTORE_CLIENT_IMPL_CLASS, RestoreClientImpl.class,
+ RestoreClient.class);
+ RestoreClient client = ReflectionUtils.newInstance(cls, conf);
+ client.setConf(conf);
+ return client;
+ }
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/backup/BackupType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/backup/BackupType.java
new file mode 100644
index 0000000..e2e3446
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/backup/BackupType.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+public enum BackupType {
+ FULL, INCREMENTAL
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
new file mode 100644
index 0000000..6e5a355
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java
@@ -0,0 +1,472 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupManifest;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
+import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * View to an on-disk Backup Image FileSytem
+ * Provides the set of methods necessary to interact with the on-disk Backup Image data.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class HBackupFileSystem {
+ public static final Log LOG = LogFactory.getLog(HBackupFileSystem.class);
+
+ private final String RESTORE_TMP_PATH = "/tmp";
+ private final String[] ignoreDirs = { "recovered.edits" };
+
+ private final Configuration conf;
+ private final FileSystem fs;
+ private final Path backupRootPath;
+ private final Path restoreTmpPath;
+ private final String backupId;
+
+ /**
+ * Create a view to the on-disk Backup Image.
+ * @param conf to use
+ * @param backupPath to where the backup Image stored
+ * @param backupId represent backup Image
+ */
+ public HBackupFileSystem(final Configuration conf, final Path backupRootPath, final String backupId)
+ throws IOException {
+ this.conf = conf;
+ this.fs = backupRootPath.getFileSystem(conf);
+ this.backupRootPath = backupRootPath;
+ this.backupId = backupId; // the backup ID for the lead backup Image
+ this.restoreTmpPath = new Path(conf.get("hbase.fs.tmp.dir") != null?
+ conf.get("hbase.fs.tmp.dir"): RESTORE_TMP_PATH,
+ "restore");
+ }
+
+ public Path getBackupRootPath() {
+ return backupRootPath;
+ }
+
+ public String getBackupId() {
+ return backupId;
+ }
+
+ /**
+ * @param tableName is the table backed up
+ * @return {@link HTableDescriptor} saved in backup image of the table
+ */
+ public HTableDescriptor getTableDesc(TableName tableName)
+ throws FileNotFoundException, IOException {
+ Path tableInfoPath = this.getTableInfoPath(tableName);
+ SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath);
+ SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc);
+ HTableDescriptor tableDescriptor = manifest.getTableDescriptor();
+ if (!tableDescriptor.getNameAsString().equals(tableName)) {
+ LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: "
+ + tableInfoPath.toString());
+ LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getNameAsString());
+ }
+ return tableDescriptor;
+ }
+
+ /**
+ * Given the backup root dir, backup id and the table name, return the backup image location,
+ * which is also where the backup manifest file is. return value look like:
+ * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/default/t1_dn/backup_1396650096738"
+ * @param backupRootDir backup root directory
+ * @param backupId backup id
+ * @param table table name
+ * @return backupPath String for the particular table
+ */
+ public static String getTableBackupDir(String backupRootDir, String backupId,
+ TableName tableName) {
+ return backupRootDir + Path.SEPARATOR + tableName.getNamespaceAsString() + Path.SEPARATOR
+ + tableName.getQualifierAsString() + Path.SEPARATOR + backupId;
+ }
+
+ /**
+ * Given the backup root dir, backup id and the table name, return the backup image location,
+ * which is also where the backup manifest file is. return value look like:
+ * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/default/t1_dn/backup_1396650096738"
+ * @param backupRootPath backup root path
+ * @param tableName table name
+ * @param backupId backup Id
+ * @return backupPath for the particular table
+ */
+ public static Path getTableBackupPath(Path backupRootPath, TableName tableName, String backupId) {
+ return new Path(backupRootPath, tableName.getNamespaceAsString() + Path.SEPARATOR
+ + tableName.getQualifierAsString() + Path.SEPARATOR + backupId);
+ }
+
+ /**
+ * return value represent path for:
+ * ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/.hbase-snapshot"
+ * @param backupRootPath backup root path
+ * @param tableName table name
+ * @param backupId backup Id
+ * @return path for snapshot
+ */
+ public static Path getTableSnapshotPath(Path backupRootPath, TableName tableName,
+ String backupId) {
+ return new Path(getTableBackupPath(backupRootPath, tableName, backupId),
+ HConstants.SNAPSHOT_DIR_NAME);
+ }
+
+ /**
+ * return value represent path for:
+ * "..../default/t1_dn/backup_1396650096738/.hbase-snapshot/snapshot_1396650097621_default_t1_dn"
+ * this path contains .snapshotinfo, .tabledesc (0.96 and 0.98) this path contains .snapshotinfo,
+ * .data.manifest (trunk)
+ * @param tableName table name
+ * @return path to table info
+ * @throws FileNotFoundException exception
+ * @throws IOException exception
+ */
+ public Path getTableInfoPath(TableName tableName)
+ throws FileNotFoundException, IOException {
+ Path tableSnapShotPath = getTableSnapshotPath(backupRootPath, tableName, backupId);
+ Path tableInfoPath = null;
+
+ // can't build the path directly as the timestamp values are different
+ FileStatus[] snapshots = fs.listStatus(tableSnapShotPath);
+ for (FileStatus snapshot : snapshots) {
+ tableInfoPath = snapshot.getPath();
+ // SnapshotManifest.DATA_MANIFEST_NAME = "data.manifest";
+ if (tableInfoPath.getName().endsWith("data.manifest")) {
+ break;
+ }
+ }
+ return tableInfoPath;
+ }
+
+ /**
+ * return value represent path for:
+ * ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/archive/data/default/t1_dn"
+ * @param tabelName table name
+ * @return path to table archive
+ * @throws IOException exception
+ */
+ public Path getTableArchivePath(TableName tableName)
+ throws IOException {
+ Path baseDir = new Path(getTableBackupPath(backupRootPath, tableName, backupId),
+ HConstants.HFILE_ARCHIVE_DIRECTORY);
+ Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR);
+ Path archivePath = new Path(dataDir, tableName.getNamespaceAsString());
+ Path tableArchivePath =
+ new Path(archivePath, tableName.getQualifierAsString());
+ if (!fs.exists(tableArchivePath) || !fs.getFileStatus(tableArchivePath).isDirectory()) {
+ LOG.debug("Folder tableArchivePath: " + tableArchivePath.toString() + " does not exists");
+ tableArchivePath = null; // empty table has no archive
+ }
+ return tableArchivePath;
+ }
+
+ /**
+ * Given the backup root dir and the backup id, return the log file location for an incremental
+ * backup.
+ * @param backupRootDir backup root directory
+ * @param backupId backup id
+ * @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738"
+ */
+ public static String getLogBackupDir(String backupRootDir, String backupId) {
+ return backupRootDir + Path.SEPARATOR + HConstants.HREGION_LOGDIR_NAME + Path.SEPARATOR
+ + backupId;
+ }
+
+ public static Path getLogBackupPath(String backupRootDir, String backupId) {
+ return new Path(getLogBackupDir(backupRootDir, backupId));
+ }
+
+ private static Path getManifestPath(TableName tableName, Configuration conf,
+ Path backupRootPath, String backupId) throws IOException {
+ Path manifestPath = new Path(getTableBackupPath(backupRootPath, tableName, backupId),
+ BackupManifest.MANIFEST_FILE_NAME);
+ FileSystem fs = backupRootPath.getFileSystem(conf);
+ if (!fs.exists(manifestPath)) {
+ // check log dir for incremental backup case
+ manifestPath =
+ new Path(getLogBackupDir(backupRootPath.toString(), backupId) + Path.SEPARATOR
+ + BackupManifest.MANIFEST_FILE_NAME);
+ if (!fs.exists(manifestPath)) {
+ String errorMsg =
+ "Could not find backup manifest for " + backupId + " in " + backupRootPath.toString();
+ throw new IOException(errorMsg);
+ }
+ }
+ return manifestPath;
+ }
+
+ public static BackupManifest getManifest(TableName tableName, Configuration conf,
+ Path backupRootPath, String backupId) throws IOException {
+ BackupManifest manifest = new BackupManifest(conf,
+ getManifestPath(tableName, conf, backupRootPath, backupId));
+ return manifest;
+ }
+
+ /**
+ * Gets region list
+ * @param tableName table name
+ * @return RegionList region list
+ * @throws FileNotFoundException exception
+ * @throws IOException exception
+ */
+
+ public ArrayList getRegionList(TableName tableName)
+ throws FileNotFoundException, IOException {
+ Path tableArchivePath = this.getTableArchivePath(tableName);
+ ArrayList regionDirList = new ArrayList();
+ FileStatus[] children = fs.listStatus(tableArchivePath);
+ for (FileStatus childStatus : children) {
+ // here child refer to each region(Name)
+ Path child = childStatus.getPath();
+ regionDirList.add(child);
+ }
+ return regionDirList;
+ }
+
+ /**
+ * Gets region list
+ * @param tableArchivePath table archive path
+ * @return RegionList region list
+ * @throws FileNotFoundException exception
+ * @throws IOException exception
+ */
+ public ArrayList getRegionList(Path tableArchivePath) throws FileNotFoundException,
+ IOException {
+ ArrayList regionDirList = new ArrayList();
+ FileStatus[] children = fs.listStatus(tableArchivePath);
+ for (FileStatus childStatus : children) {
+ // here child refer to each region(Name)
+ Path child = childStatus.getPath();
+ regionDirList.add(child);
+ }
+ return regionDirList;
+ }
+
+ /**
+ * Counts the number of files in all subdirectories of an HBase tables, i.e. HFiles. And finds the
+ * maximum number of files in one HBase table.
+ * @param tableArchivePath archive path
+ * @return the maximum number of files found in 1 HBase table
+ * @throws IOException exception
+ */
+ public int getMaxNumberOfFilesInSubDir(Path tableArchivePath) throws IOException {
+ int result = 1;
+ ArrayList regionPathList = this.getRegionList(tableArchivePath);
+ // tableArchivePath = this.getTableArchivePath(tableName);
+
+ if (regionPathList == null || regionPathList.size() == 0) {
+ throw new IllegalStateException("Cannot restore hbase table because directory '"
+ + tableArchivePath + "' is not a directory.");
+ }
+
+ for (Path regionPath : regionPathList) {
+ result = Math.max(result, getNumberOfFilesInDir(regionPath));
+ }
+ return result;
+ }
+
+ /**
+ * Counts the number of files in all subdirectories of an HBase table, i.e. HFiles.
+ * @param regionPath Path to an HBase table directory
+ * @return the number of files all directories
+ * @throws IOException exception
+ */
+ public int getNumberOfFilesInDir(Path regionPath) throws IOException {
+ int result = 0;
+
+ if (!fs.exists(regionPath) || !fs.getFileStatus(regionPath).isDirectory()) {
+ throw new IllegalStateException("Cannot restore hbase table because directory '"
+ + regionPath.toString() + "' is not a directory.");
+ }
+
+ FileStatus[] tableDirContent = fs.listStatus(regionPath);
+ for (FileStatus subDirStatus : tableDirContent) {
+ FileStatus[] colFamilies = fs.listStatus(subDirStatus.getPath());
+ for (FileStatus colFamilyStatus : colFamilies) {
+ FileStatus[] colFamilyContent = fs.listStatus(colFamilyStatus.getPath());
+ result += colFamilyContent.length;
+ }
+ }
+ return result;
+ }
+
+ /**
+ * Duplicate the backup image if it's on local cluster
+ * @see HStore#bulkLoadHFile(String, long)
+ * @see HRegionFileSystem#bulkLoadStoreFile(String familyName, Path srcPath, long seqNum)
+ * @param tableArchivePath archive path
+ * @return the new tableArchivePath
+ * @throws IOException exception
+ */
+ public Path checkLocalAndBackup(Path tableArchivePath) throws IOException {
+ // Move the file if it's on local cluster
+ boolean isCopyNeeded = false;
+
+ FileSystem srcFs = tableArchivePath.getFileSystem(conf);
+ FileSystem desFs = FileSystem.get(conf);
+ if (tableArchivePath.getName().startsWith("/")) {
+ isCopyNeeded = true;
+ } else {
+ // This should match what is done in @see HRegionFileSystem#bulkLoadStoreFile(String, Path,
+ // long)
+ if (srcFs.getUri().equals(desFs.getUri())) {
+ LOG.debug("cluster hold the backup image: " + srcFs.getUri() + "; local cluster node: "
+ + desFs.getUri());
+ isCopyNeeded = true;
+ }
+ }
+ if (isCopyNeeded) {
+ LOG.debug("File " + tableArchivePath + " on local cluster, back it up before restore");
+ if (desFs.exists(restoreTmpPath)) {
+ try {
+ desFs.delete(restoreTmpPath, true);
+ } catch (IOException e) {
+ LOG.debug("Failed to delete path: " + restoreTmpPath
+ + ", need to check whether restore target DFS cluster is healthy");
+ }
+ }
+ FileUtil.copy(srcFs, tableArchivePath, desFs, restoreTmpPath, false, conf);
+ LOG.debug("Copied to temporary path on local cluster: " + restoreTmpPath);
+ tableArchivePath = restoreTmpPath;
+ }
+ return tableArchivePath;
+ }
+
+ /**
+ * Calculate region boundaries and add all the column families to the table descriptor
+ * @param regionDirList region dir list
+ * @return a set of keys to store the boundaries
+ */
+ public byte[][] generateBoundaryKeys(ArrayList regionDirList)
+ throws FileNotFoundException, IOException {
+ TreeMap map = new TreeMap(Bytes.BYTES_COMPARATOR);
+ // Build a set of keys to store the boundaries
+ byte[][] keys = null;
+ // calculate region boundaries and add all the column families to the table descriptor
+ for (Path regionDir : regionDirList) {
+ LOG.debug("Parsing region dir: " + regionDir);
+ Path hfofDir = regionDir;
+
+ if (!fs.exists(hfofDir)) {
+ LOG.warn("HFileOutputFormat dir " + hfofDir + " not found");
+ }
+
+ FileStatus[] familyDirStatuses = fs.listStatus(hfofDir);
+ if (familyDirStatuses == null) {
+ throw new IOException("No families found in " + hfofDir);
+ }
+
+ for (FileStatus stat : familyDirStatuses) {
+ if (!stat.isDirectory()) {
+ LOG.warn("Skipping non-directory " + stat.getPath());
+ continue;
+ }
+ boolean isIgnore = false;
+ String pathName = stat.getPath().getName();
+ for (String ignore : ignoreDirs) {
+ if (pathName.contains(ignore)) {
+ LOG.warn("Skipping non-family directory" + pathName);
+ isIgnore = true;
+ break;
+ }
+ }
+ if (isIgnore) {
+ continue;
+ }
+ Path familyDir = stat.getPath();
+ LOG.debug("Parsing family dir [" + familyDir.toString() + " in region [" + regionDir + "]");
+ // Skip _logs, etc
+ if (familyDir.getName().startsWith("_") || familyDir.getName().startsWith(".")) {
+ continue;
+ }
+
+ // start to parse hfile inside one family dir
+ Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir));
+ for (Path hfile : hfiles) {
+ if (hfile.getName().startsWith("_") || hfile.getName().startsWith(".")
+ || StoreFileInfo.isReference(hfile.getName())
+ || HFileLink.isHFileLink(hfile.getName())) {
+ continue;
+ }
+ HFile.Reader reader = HFile.createReader(fs, hfile, new CacheConfig(conf), conf);
+ final byte[] first, last;
+ try {
+ reader.loadFileInfo();
+ first = reader.getFirstRowKey();
+ last = reader.getLastRowKey();
+ LOG.debug("Trying to figure out region boundaries hfile=" + hfile + " first="
+ + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last));
+
+ // To eventually infer start key-end key boundaries
+ Integer value = map.containsKey(first) ? (Integer) map.get(first) : 0;
+ map.put(first, value + 1);
+ value = map.containsKey(last) ? (Integer) map.get(last) : 0;
+ map.put(last, value - 1);
+ } finally {
+ reader.close();
+ }
+ }
+ }
+ }
+ keys = LoadIncrementalHFiles.inferBoundaries(map);
+ return keys;
+ }
+
+ /**
+ * Check whether the backup image path and there is manifest file in the path.
+ * @param backupManifestMap If all the manifests are found, then they are put into this map
+ * @param tableArray the tables involved
+ * @throws IOException exception
+ */
+ public static void checkImageManifestExist(HashMap backupManifestMap,
+ TableName[] tableArray, Configuration conf,
+ Path backupRootPath, String backupId) throws IOException {
+ for (TableName tableName : tableArray) {
+ BackupManifest manifest = getManifest(tableName, conf, backupRootPath, backupId);
+ backupManifestMap.put(tableName, manifest);
+ }
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreClient.java
new file mode 100644
index 0000000..a3aaa98
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreClient.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.TableName;
+
+public interface RestoreClient {
+
+ public void setConf(Configuration conf);
+
+ /**
+ * Restore operation.
+ * @param backupRootDir The root dir for backup image
+ * @param backupId The backup id for image to be restored
+ * @param check True if only do dependency check
+ * @param autoRestore True if automatically restore following the dependency
+ * @param sTableArray The array of tables to be restored
+ * @param tTableArray The array of mapping tables to restore to
+ * @param isOverwrite True then do restore overwrite if target table exists, otherwise fail the
+ * request if target table exists
+ * @return True if only do dependency check
+ * @throws IOException if any failure during restore
+ */
+ public boolean restore(
+ String backupRootDir,
+ String backupId, boolean check, boolean autoRestore, TableName[] sTableArray,
+ TableName[] tTableArray, boolean isOverwrite) throws IOException;
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
new file mode 100644
index 0000000..541882a
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupUtil;
+import org.apache.hadoop.hbase.util.AbstractHBaseTool;
+import org.apache.hadoop.hbase.util.LogUtils;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+
+public class RestoreDriver extends AbstractHBaseTool {
+
+ private static final Log LOG = LogFactory.getLog(BackupDriver.class);
+ private Options opt;
+ private CommandLine cmd;
+
+ private static final String OPTION_OVERWRITE = "overwrite";
+ private static final String OPTION_CHECK = "check";
+ private static final String OPTION_AUTOMATIC = "automatic";
+
+ private static final String USAGE =
+ "Usage: hbase restore [tableMapping] \n"
+ + " [-overwrite] [-check] [-automatic]\n"
+ + " backup_root_path The parent location where the backup images are stored\n"
+ + " backup_id The id identifying the backup image\n"
+ + " table(s) Table(s) from the backup image to be restored.\n"
+ + " Tables are separated by comma.\n"
+ + " Options:\n"
+ + " tableMapping A comma separated list of target tables.\n"
+ + " If specified, each table in must have a mapping.\n"
+ + " -overwrite With this option, restore overwrites to the existing table "
+ + "if there's any in\n"
+ + " restore target. The existing table must be online before restore.\n"
+ + " -check With this option, restore sequence and dependencies are checked\n"
+ + " and verified without executing the restore\n"
+ + " -automatic With this option, all the dependencies are automatically restored\n"
+ + " together with this backup image following the correct order.\n"
+ + " The restore dependencies can be checked by using \"-check\" "
+ + "option,\n"
+ + " or using \"hbase backup describe\" command. Without this option, "
+ + "only\n" + " this backup image is restored\n";
+
+ protected void init() throws IOException {
+ // define supported options
+ opt = new Options();
+ opt.addOption(OPTION_OVERWRITE, false,
+ "Overwrite the data if any of the restore target tables exists");
+ opt.addOption(OPTION_CHECK, false, "Check restore sequence and dependencies");
+ opt.addOption(OPTION_AUTOMATIC, false, "Restore all dependencies");
+ opt.addOption("debug", false, "Enable debug logging");
+
+ // disable irrelevant loggers to avoid it mess up command output
+ LogUtils.disableUselessLoggers(LOG);
+ }
+
+ private int parseAndRun(String[] args) {
+ CommandLine cmd = null;
+ try {
+ cmd = new PosixParser().parse(opt, args);
+ } catch (ParseException e) {
+ LOG.error("Could not parse command", e);
+ return -1;
+ }
+
+ // enable debug logging
+ Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup");
+ if (cmd.hasOption("debug")) {
+ backupClientLogger.setLevel(Level.DEBUG);
+ }
+
+ // whether to overwrite to existing table if any, false by default
+ boolean isOverwrite = cmd.hasOption(OPTION_OVERWRITE);
+ if (isOverwrite) {
+ LOG.debug("Found -overwrite option in restore command, "
+ + "will overwrite to existing table if any in the restore target");
+ }
+
+ // whether to only check the dependencies, false by default
+ boolean check = cmd.hasOption(OPTION_CHECK);
+ if (check) {
+ LOG.debug("Found -check option in restore command, "
+ + "will check and verify the dependencies");
+ }
+
+ // whether to restore all dependencies, false by default
+ boolean autoRestore = cmd.hasOption(OPTION_AUTOMATIC);
+ if (autoRestore) {
+ LOG.debug("Found -automatic option in restore command, "
+ + "will automatically retore all the dependencies");
+ }
+
+ // parse main restore command options
+ String[] remainArgs = cmd.getArgs();
+ if (remainArgs.length < 3) {
+ System.out.println("ERROR: missing arguments");
+ System.out.println(USAGE);
+ return -1;
+ }
+
+ String backupRootDir = remainArgs[0];
+ String backupId = remainArgs[1];
+ String tables = remainArgs[2];
+
+ String tableMapping = (remainArgs.length > 3) ? remainArgs[3] : null;
+
+ TableName[] sTableArray = BackupUtil.parseTableNames(tables);
+ TableName[] tTableArray = BackupUtil.parseTableNames(tableMapping);
+
+ if (sTableArray != null && tTableArray != null && (sTableArray.length != tTableArray.length)) {
+ System.err.println("ERROR: table mapping mismatch: " + tables + " : " + tableMapping);
+ System.out.println(USAGE);
+ return -1;
+ }
+
+ try {
+ RestoreClient client = BackupRestoreFactory.getRestoreClient(conf);
+ client.restore(backupRootDir, backupId, check, autoRestore, sTableArray,
+ tTableArray, isOverwrite);
+ } catch (IOException e) {
+ System.err.println("ERROR: " + e.getMessage());
+ return -1;
+ }
+ return 0;
+ }
+
+ @Override
+ protected void addOptions() {
+ }
+
+ @Override
+ protected void processOptions(CommandLine cmd) {
+ this.cmd = cmd;
+ }
+
+ @Override
+ protected int doWork() throws Exception {
+ init();
+ return parseAndRun(cmd.getArgs());
+ }
+
+ public static void main(String[] args) throws Exception {
+ Configuration conf = HBaseConfiguration.create();
+ int ret = ToolRunner.run(conf, new BackupDriver(), args);
+ System.exit(ret);
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java
new file mode 100644
index 0000000..5b8a151
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java
@@ -0,0 +1,183 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupClient;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.BackupUtility;
+import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Backup HBase tables locally or on a remote cluster Serve as client entry point for the following
+ * features: - Full Backup provide local and remote back/restore for a list of tables - Incremental
+ * backup to build on top of full backup as daily/weekly backup - Convert incremental backup WAL
+ * files into hfiles - Merge several backup images into one(like merge weekly into monthly) - Add
+ * and remove table to and from Backup image - Cancel a backup process - Describe information of
+ * a backup image
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public final class BackupClientImpl implements BackupClient {
+ private static final Log LOG = LogFactory.getLog(BackupClientImpl.class);
+ private Configuration conf;
+ private BackupManager backupManager;
+
+ public BackupClientImpl() {
+ }
+
+ @Override
+ public void setConf(Configuration conf) {
+ this.conf = conf;
+ }
+
+ /**
+ * Prepare and submit Backup request
+ * @param backupId : backup_timestame (something like backup_1398729212626)
+ * @param backupType : full or incremental
+ * @param tableList : tables to be backuped
+ * @param targetRootDir : specified by user
+ * @throws IOException exception
+ */
+ protected void requestBackup(String backupId, BackupType backupType, List tableList,
+ String targetRootDir) throws IOException {
+
+ BackupContext backupContext = null;
+
+ HBaseAdmin hbadmin = null;
+ Connection conn = null;
+ try {
+ backupManager = new BackupManager(conf);
+ if (backupType == BackupType.INCREMENTAL) {
+ Set incrTableSet = backupManager.getIncrementalBackupTableSet();
+ if (incrTableSet.isEmpty()) {
+ LOG.warn("Incremental backup table set contains no table.\n"
+ + "Use 'backup create full' or 'backup stop' to \n "
+ + "change the tables covered by incremental backup.");
+ throw new DoNotRetryIOException("No table covered by incremental backup.");
+ }
+
+ LOG.info("Incremental backup for the following table set: " + incrTableSet);
+ tableList = Lists.newArrayList(incrTableSet);
+ }
+
+ // check whether table exists first before starting real request
+ if (tableList != null) {
+ ArrayList nonExistingTableList = null;
+ conn = ConnectionFactory.createConnection(conf);
+ hbadmin = (HBaseAdmin) conn.getAdmin();
+ for (TableName tableName : tableList) {
+ if (!hbadmin.tableExists(tableName)) {
+ if (nonExistingTableList == null) {
+ nonExistingTableList = new ArrayList<>();
+ }
+ nonExistingTableList.add(tableName);
+ }
+ }
+ if (nonExistingTableList != null) {
+ if (backupType == BackupType.INCREMENTAL ) {
+ LOG.warn("Incremental backup table set contains non-exising table: "
+ + nonExistingTableList);
+ } else {
+ // Throw exception only in full mode - we try to backup non-existing table
+ throw new DoNotRetryIOException("Non-existing tables found in the table list: "
+ + nonExistingTableList);
+ }
+ }
+ }
+
+ // if any target table backup dir already exist, then no backup action taken
+ if (tableList != null) {
+ for (TableName table : tableList) {
+ String targetTableBackupDir =
+ HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table);
+ Path targetTableBackupDirPath = new Path(targetTableBackupDir);
+ FileSystem outputFs = FileSystem.get(targetTableBackupDirPath.toUri(), conf);
+ if (outputFs.exists(targetTableBackupDirPath)) {
+ throw new DoNotRetryIOException("Target backup directory " + targetTableBackupDir
+ + " exists already.");
+ }
+ }
+ }
+ backupContext =
+ backupManager.createBackupContext(backupId, backupType, tableList, targetRootDir);
+ backupManager.initialize();
+ backupManager.dispatchRequest(backupContext);
+ } catch (BackupException e) {
+ // suppress the backup exception wrapped within #initialize or #dispatchRequest, backup
+ // exception has already been handled normally
+ LOG.error("Backup Exception ", e);
+ } finally {
+ if (hbadmin != null) {
+ hbadmin.close();
+ }
+ if (conn != null) {
+ conn.close();
+ }
+ }
+ }
+
+ @Override
+ public String create(BackupType backupType, List tableList, String backupRootPath)
+ throws IOException {
+
+ String backupId = BackupRestoreConstants.BACKUPID_PREFIX + EnvironmentEdgeManager.currentTime();
+ BackupUtility.checkTargetDir(backupRootPath, conf);
+
+ // table list specified for backup, trigger backup on specified tables
+ try {
+ requestBackup(backupId, backupType, tableList, backupRootPath);
+ } catch (RuntimeException e) {
+ String errMsg = e.getMessage();
+ if (errMsg != null
+ && (errMsg.startsWith("Non-existing tables found") || errMsg
+ .startsWith("Snapshot is not found"))) {
+ LOG.error(errMsg + ", please check your command");
+ throw e;
+ } else {
+ throw e;
+ }
+ } finally{
+ if(backupManager != null) {
+ backupManager.close();
+ }
+ }
+ return backupId;
+ }
+
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
new file mode 100644
index 0000000..56e26fa
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
@@ -0,0 +1,158 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import org.apache.commons.cli.CommandLine;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hbase.backup.BackupClient;
+import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants.BackupCommand;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+import com.google.common.collect.Lists;
+
+/**
+ * General backup commands, options and usage messages
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class BackupCommands {
+
+ private static final String USAGE = "Usage: hbase backup COMMAND\n"
+ + "where COMMAND is one of:\n" + " create create a new backup image\n"
+ + "Enter \'help COMMAND\' to see help message for each command\n";
+
+ private static final String CREATE_CMD_USAGE =
+ "Usage: hbase backup create [tables] [-convert] "
+ + "\n" + " type \"full\" to create a full backup image;\n"
+ + " \"incremental\" to create an incremental backup image\n"
+ + " backup_root_path The full root path to store the backup image,\n"
+ + " the prefix can be hdfs, webhdfs, gpfs, etc\n" + " Options:\n"
+ + " tables If no tables (\"\") are specified, all tables are backed up. "
+ + "Otherwise it is a\n" + " comma separated list of tables.\n"
+ + " -convert For an incremental backup, convert WAL files to HFiles\n";
+
+ public static abstract class Command extends Configured {
+ Command(Configuration conf) {
+ super(conf);
+ }
+ public abstract void execute() throws IOException;
+ }
+
+ private BackupCommands() {
+ throw new AssertionError("Instantiating utility class...");
+ }
+
+ public static Command createCommand(Configuration conf, BackupCommand type, CommandLine cmdline) {
+ Command cmd = null;
+ switch (type) {
+ case CREATE:
+ cmd = new CreateCommand(conf, cmdline);
+ break;
+ case HELP:
+ default:
+ cmd = new HelpCommand(conf, cmdline);
+ break;
+ }
+ return cmd;
+ }
+
+ private static class CreateCommand extends Command {
+ CommandLine cmdline;
+
+ CreateCommand(Configuration conf, CommandLine cmdline) {
+ super(conf);
+ this.cmdline = cmdline;
+ }
+
+ @Override
+ public void execute() throws IOException {
+ if (cmdline == null || cmdline.getArgs() == null) {
+ System.out.println("ERROR: missing arguments");
+ System.out.println(CREATE_CMD_USAGE);
+ System.exit(-1);
+ }
+ String[] args = cmdline.getArgs();
+ if (args.length < 2 || args.length > 3) {
+ System.out.println("ERROR: wrong number of arguments");
+ System.out.println(CREATE_CMD_USAGE);
+ System.exit(-1);
+ }
+
+ if (!BackupType.FULL.toString().equalsIgnoreCase(args[0])
+ && !BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[0])) {
+ System.out.println("ERROR: invalid backup type");
+ System.out.println(CREATE_CMD_USAGE);
+ System.exit(-1);
+ }
+
+ String tables = (args.length == 3) ? args[2] : null;
+
+ try {
+ BackupClient client = BackupRestoreFactory.getBackupClient(getConf());
+ client.create(BackupType.valueOf(args[0].toUpperCase()),
+ Lists.newArrayList(BackupUtil.parseTableNames(tables)), args[1]);
+ } catch (RuntimeException e) {
+ System.out.println("ERROR: " + e.getMessage());
+ System.exit(-1);
+ }
+ }
+ }
+
+ private static class HelpCommand extends Command {
+ CommandLine cmdline;
+
+ HelpCommand(Configuration conf, CommandLine cmdline) {
+ super(conf);
+ this.cmdline = cmdline;
+ }
+
+ @Override
+ public void execute() throws IOException {
+ if (cmdline == null) {
+ System.out.println(USAGE);
+ System.exit(0);
+ }
+
+ String[] args = cmdline.getArgs();
+ if (args == null || args.length == 0) {
+ System.out.println(USAGE);
+ System.exit(0);
+ }
+
+ if (args.length != 1) {
+ System.out.println("Only support check help message of a single command type");
+ System.out.println(USAGE);
+ System.exit(0);
+ }
+
+ String type = args[0];
+
+ if (BackupCommand.CREATE.name().equalsIgnoreCase(type)) {
+ System.out.println(CREATE_CMD_USAGE);
+ } // other commands will be supported in future jira
+ System.exit(0);
+ }
+ }
+
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java
new file mode 100644
index 0000000..1be0c3b
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java
@@ -0,0 +1,382 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.BackupProtos;
+import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder;
+import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus;
+
+/**
+ * An object to encapsulate the information for each backup request
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupContext {
+
+ public Map getBackupStatusMap() {
+ return backupStatusMap;
+ }
+
+ public void setBackupStatusMap(Map backupStatusMap) {
+ this.backupStatusMap = backupStatusMap;
+ }
+
+ public HashMap> getTableSetTimestampMap() {
+ return tableSetTimestampMap;
+ }
+
+ public void setTableSetTimestampMap(
+ HashMap> tableSetTimestampMap) {
+ this.tableSetTimestampMap = tableSetTimestampMap;
+ }
+
+ public String getHlogTargetDir() {
+ return hlogTargetDir;
+ }
+
+ public void setType(BackupType type) {
+ this.type = type;
+ }
+
+ public void setTargetRootDir(String targetRootDir) {
+ this.targetRootDir = targetRootDir;
+ }
+
+ public void setTotalBytesCopied(long totalBytesCopied) {
+ this.totalBytesCopied = totalBytesCopied;
+ }
+
+ public void setCancelled(boolean cancelled) {
+ this.state = BackupState.CANCELLED;;
+ }
+
+ // backup id: a timestamp when we request the backup
+ private String backupId;
+
+ // backup type, full or incremental
+ private BackupType type;
+
+ // target root directory for storing the backup files
+ private String targetRootDir;
+
+ // overall backup state
+ private BackupHandler.BackupState state;
+
+ // overall backup phase
+ private BackupHandler.BackupPhase phase;
+
+ // overall backup failure message
+ private String failedMsg;
+
+ // backup status map for all tables
+ private Map backupStatusMap;
+
+ // actual start timestamp of the backup process
+ private long startTs;
+
+ // actual end timestamp of the backup process, could be fail or complete
+ private long endTs;
+
+ // the total bytes of incremental logs copied
+ private long totalBytesCopied;
+
+ // for incremental backup, the location of the backed-up hlogs
+ private String hlogTargetDir = null;
+
+ // incremental backup file list
+ transient private List incrBackupFileList;
+
+ // new region server log timestamps for table set after distributed log roll
+ // key - table name, value - map of RegionServer hostname -> last log rolled timestamp
+ transient private HashMap> tableSetTimestampMap;
+
+ // backup progress in %% (0-100)
+
+ private int progress;
+
+ public BackupContext() {
+ }
+
+ public BackupContext(String backupId, BackupType type, TableName[] tables, String targetRootDir) {
+ backupStatusMap = new HashMap();
+
+ this.backupId = backupId;
+ this.type = type;
+ this.targetRootDir = targetRootDir;
+
+ this.addTables(tables);
+
+ if (type == BackupType.INCREMENTAL) {
+ setHlogTargetDir(HBackupFileSystem.getLogBackupDir(targetRootDir, backupId));
+ }
+
+ this.startTs = 0;
+ this.endTs = 0;
+ }
+
+ /**
+ * Set progress string
+ * @param msg progress message
+ */
+
+ public void setProgress(int p) {
+ this.progress = p;
+ }
+
+ /**
+ * Get current progress
+ */
+ public int getProgress() {
+ return progress;
+ }
+
+
+ /**
+ * Has been marked as cancelled or not.
+ * @return True if marked as cancelled
+ */
+ public boolean isCancelled() {
+ return this.state == BackupState.CANCELLED;
+ }
+
+ public String getBackupId() {
+ return backupId;
+ }
+
+ public void setBackupId(String backupId) {
+ this.backupId = backupId;
+ }
+
+ public BackupStatus getBackupStatus(TableName table) {
+ return this.backupStatusMap.get(table);
+ }
+
+ public String getFailedMsg() {
+ return failedMsg;
+ }
+
+ public void setFailedMsg(String failedMsg) {
+ this.failedMsg = failedMsg;
+ }
+
+ public long getStartTs() {
+ return startTs;
+ }
+
+ public void setStartTs(long startTs) {
+ this.startTs = startTs;
+ }
+
+ public long getEndTs() {
+ return endTs;
+ }
+
+ public void setEndTs(long endTs) {
+ this.endTs = endTs;
+ }
+
+ public long getTotalBytesCopied() {
+ return totalBytesCopied;
+ }
+
+ public BackupHandler.BackupState getState() {
+ return state;
+ }
+
+ public void setState(BackupHandler.BackupState flag) {
+ this.state = flag;
+ }
+
+ public BackupHandler.BackupPhase getPhase() {
+ return phase;
+ }
+
+ public void setPhase(BackupHandler.BackupPhase phase) {
+ this.phase = phase;
+ }
+
+ public BackupType getType() {
+ return type;
+ }
+
+ public void setSnapshotName(TableName table, String snapshotName) {
+ this.backupStatusMap.get(table).setSnapshotName(snapshotName);
+ }
+
+ public String getSnapshotName(TableName table) {
+ return this.backupStatusMap.get(table).getSnapshotName();
+ }
+
+ public List getSnapshotNames() {
+ List snapshotNames = new ArrayList();
+ for (BackupStatus backupStatus : this.backupStatusMap.values()) {
+ snapshotNames.add(backupStatus.getSnapshotName());
+ }
+ return snapshotNames;
+ }
+
+ public Set getTables() {
+ return this.backupStatusMap.keySet();
+ }
+
+ public List getTableNames() {
+ return new ArrayList(backupStatusMap.keySet());
+ }
+
+ public void addTables(TableName[] tables) {
+ for (TableName table : tables) {
+ BackupStatus backupStatus = new BackupStatus(table, this.targetRootDir, this.backupId);
+ this.backupStatusMap.put(table, backupStatus);
+ }
+ }
+
+ public String getTargetRootDir() {
+ return targetRootDir;
+ }
+
+ public void setHlogTargetDir(String hlogTagetDir) {
+ this.hlogTargetDir = hlogTagetDir;
+ }
+
+ public String getHLogTargetDir() {
+ return hlogTargetDir;
+ }
+
+ public List getIncrBackupFileList() {
+ return incrBackupFileList;
+ }
+
+ public List setIncrBackupFileList(List incrBackupFileList) {
+ this.incrBackupFileList = incrBackupFileList;
+ return this.incrBackupFileList;
+ }
+
+ /**
+ * Set the new region server log timestamps after distributed log roll
+ * @param newTableSetTimestampMap table timestamp map
+ */
+ public void setIncrTimestampMap(HashMap> newTableSetTimestampMap) {
+ this.tableSetTimestampMap = newTableSetTimestampMap;
+ }
+
+ /**
+ * Get new region server log timestamps after distributed log roll
+ * @return new region server log timestamps
+ */
+ public HashMap> getIncrTimestampMap() {
+ return this.tableSetTimestampMap;
+ }
+
+ public TableName getTableBySnapshot(String snapshotName) {
+ for (Entry entry : this.backupStatusMap.entrySet()) {
+ if (snapshotName.equals(entry.getValue().getSnapshotName())) {
+ return entry.getKey();
+ }
+ }
+ return null;
+ }
+
+ public byte[] toByteArray() throws IOException {
+ BackupProtos.BackupContext.Builder builder =
+ BackupProtos.BackupContext.newBuilder();
+ builder.setBackupId(getBackupId());
+ setBackupStatusMap(builder);
+ builder.setEndTs(getEndTs());
+ if(getFailedMsg() != null){
+ builder.setFailedMessage(getFailedMsg());
+ }
+ if(getState() != null){
+ builder.setState(BackupProtos.BackupContext.BackupState.valueOf(getState().name()));
+ }
+ if(getPhase() != null){
+ builder.setPhase(BackupProtos.BackupContext.BackupPhase.valueOf(getPhase().name()));
+ }
+ if(getHLogTargetDir() != null){
+ builder.setHlogTargetDir(getHLogTargetDir());
+ }
+
+ builder.setProgress(getProgress());
+ builder.setStartTs(getStartTs());
+ builder.setTargetRootDir(getTargetRootDir());
+ builder.setTotalBytesCopied(getTotalBytesCopied());
+ builder.setType(BackupProtos.BackupType.valueOf(getType().name()));
+ byte[] data = builder.build().toByteArray();
+ return data;
+ }
+
+ private void setBackupStatusMap(Builder builder) {
+ for (Entry entry: backupStatusMap.entrySet()) {
+ builder.addTableBackupStatus(entry.getValue().toProto());
+ }
+ }
+
+ public static BackupContext fromByteArray(byte[] data) throws IOException {
+
+ BackupContext context = new BackupContext();
+ BackupProtos.BackupContext proto = BackupProtos.BackupContext.parseFrom(data);
+ context.setBackupId(proto.getBackupId());
+ context.setBackupStatusMap(toMap(proto.getTableBackupStatusList()));
+ context.setEndTs(proto.getEndTs());
+ if(proto.hasFailedMessage()) {
+ context.setFailedMsg(proto.getFailedMessage());
+ }
+ if(proto.hasState()) {
+ context.setState(BackupHandler.BackupState.valueOf(proto.getState().name()));
+ }
+ if(proto.hasHlogTargetDir()) {
+ context.setHlogTargetDir(proto.getHlogTargetDir());
+ }
+ if(proto.hasPhase()) {
+ context.setPhase(BackupHandler.BackupPhase.valueOf(proto.getPhase().name()));
+ }
+ if(proto.hasProgress()) {
+ context.setProgress(proto.getProgress());
+ }
+ context.setStartTs(proto.getStartTs());
+ context.setTargetRootDir(proto.getTargetRootDir());
+ context.setTotalBytesCopied(proto.getTotalBytesCopied());
+ context.setType(BackupType.valueOf(proto.getType().name()));
+ return context;
+ }
+
+ private static Map toMap(List list) {
+ HashMap map = new HashMap<>();
+ for (TableBackupStatus tbs : list){
+ map.put(ProtobufUtil.toTableName(tbs.getTable()), BackupStatus.convert(tbs));
+ }
+ return map;
+ }
+
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCopyService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCopyService.java
new file mode 100644
index 0000000..1e8da63
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCopyService.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface BackupCopyService extends Configurable {
+ static enum Type {
+ FULL, INCREMENTAL
+ }
+
+ public int copy(BackupContext backupContext, BackupManager backupManager, Configuration conf,
+ BackupCopyService.Type copyType, String[] options) throws IOException;
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java
new file mode 100644
index 0000000..af70cc8
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * Backup exception
+ */
+@SuppressWarnings("serial")
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupException extends HBaseIOException {
+ private BackupContext description;
+
+ /**
+ * Some exception happened for a backup and don't even know the backup that it was about
+ * @param msg Full description of the failure
+ */
+ public BackupException(String msg) {
+ super(msg);
+ }
+
+ /**
+ * Some exception happened for a backup with a cause
+ * @param cause the cause
+ */
+ public BackupException(Throwable cause) {
+ super(cause);
+ }
+
+ /**
+ * Exception for the given backup that has no previous root cause
+ * @param msg reason why the backup failed
+ * @param desc description of the backup that is being failed
+ */
+ public BackupException(String msg, BackupContext desc) {
+ super(msg);
+ this.description = desc;
+ }
+
+ /**
+ * Exception for the given backup due to another exception
+ * @param msg reason why the backup failed
+ * @param cause root cause of the failure
+ * @param desc description of the backup that is being failed
+ */
+ public BackupException(String msg, Throwable cause, BackupContext desc) {
+ super(msg, cause);
+ this.description = desc;
+ }
+
+ /**
+ * Exception when the description of the backup cannot be determined, due to some other root
+ * cause
+ * @param message description of what caused the failure
+ * @param e root cause
+ */
+ public BackupException(String message, Exception e) {
+ super(message, e);
+ }
+
+ public BackupContext getBackupContext() {
+ return this.description;
+ }
+
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java
new file mode 100644
index 0000000..7bd6e99
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java
@@ -0,0 +1,702 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.concurrent.Callable;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.BackupUtility;
+import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
+import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.zookeeper.KeeperException.NoNodeException;
+
+/**
+ * A Handler to carry the operations of backup progress
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupHandler implements Callable {
+ private static final Log LOG = LogFactory.getLog(BackupHandler.class);
+
+ // backup phase
+ // for overall backup (for table list, some table may go online, while some may go offline)
+ protected static enum BackupPhase {
+ REQUEST, SNAPSHOT, PREPARE_INCREMENTAL, SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST;
+ }
+
+ // backup status flag
+ public static enum BackupState {
+ WAITING, RUNNING, COMPLETE, FAILED, CANCELLED;
+ }
+
+ protected final BackupContext backupContext;
+ private final BackupManager backupManager;
+ private final Configuration conf;
+ private final Connection conn;
+
+ public BackupHandler(BackupContext backupContext,
+ BackupManager backupManager, Configuration conf, Connection connection) {
+ this.backupContext = backupContext;
+ this.backupManager = backupManager;
+ this.conf = conf;
+ this.conn = connection;
+ }
+
+ public BackupContext getBackupContext() {
+ return backupContext;
+ }
+
+ @Override
+ public Void call() throws Exception {
+ try(Admin admin = conn.getAdmin()) {
+ // overall backup begin
+ this.beginBackup(backupContext);
+ HashMap newTimestamps = null;
+ // handle full or incremental backup for table or table list
+ if (backupContext.getType() == BackupType.FULL) {
+ String savedStartCode = null;
+ boolean firstBackup = false;
+ // do snapshot for full table backup
+
+ try {
+ savedStartCode = backupManager.readBackupStartCode();
+ firstBackup = savedStartCode == null;
+ if (firstBackup) {
+ // This is our first backup. Let's put some marker on ZK so that we can hold the logs
+ // while we do the backup.
+ backupManager.writeBackupStartCode(0L);
+ }
+ // We roll log here before we do the snapshot. It is possible there is duplicate data
+ // in the log that is already in the snapshot. But if we do it after the snapshot, we
+ // could have data loss.
+ // A better approach is to do the roll log on each RS in the same global procedure as
+ // the snapshot.
+ LOG.info("Execute roll log procedure for full backup ...");
+ admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
+ LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, new HashMap());
+ newTimestamps = backupManager.readRegionServerLastLogRollResult();
+ if (firstBackup) {
+ // Updates registered log files
+ // We record ALL old WAL files as registered, because
+ // this is a first full backup in the system and these
+ // files are not needed for next incremental backup
+ List logFiles = BackupUtil.getWALFilesOlderThan(conf, newTimestamps);
+ backupManager.recordWALFiles(logFiles);
+ }
+ this.snapshotForFullBackup(backupContext);
+ } catch (BackupException e) {
+ // fail the overall backup and return
+ this.failBackup(backupContext, e, "Unexpected BackupException : ");
+ return null;
+ }
+
+ // update the faked progress currently for snapshot done
+ updateProgress(backupContext, backupManager, 10, 0);
+ // do snapshot copy
+ try {
+ this.snapshotCopy(backupContext);
+ } catch (Exception e) {
+ // fail the overall backup and return
+ this.failBackup(backupContext, e, "Unexpected BackupException : ");
+ return null;
+ }
+ // Updates incremental backup table set
+ backupManager.addIncrementalBackupTableSet(backupContext.getTables());
+
+ } else if (backupContext.getType() == BackupType.INCREMENTAL) {
+ LOG.debug("For incremental backup, current table set is "
+ + backupManager.getIncrementalBackupTableSet());
+ // do incremental table backup preparation
+ backupContext.setPhase(BackupPhase.PREPARE_INCREMENTAL);
+ // avoid action if has been cancelled
+ if (backupContext.isCancelled()) {
+ return null;
+ }
+ try {
+ IncrementalBackupManager incrBackupManager = new IncrementalBackupManager(backupManager);
+
+ newTimestamps = incrBackupManager.getIncrBackupLogFileList(backupContext);
+ } catch (Exception e) {
+ // fail the overall backup and return
+ this.failBackup(backupContext, e, "Unexpected Exception : ");
+ return null;
+ }
+ // update the faked progress currently for incremental preparation done
+ updateProgress(backupContext, backupManager, 10, 0);
+
+ // do incremental copy
+ try {
+ // copy out the table and region info files for each table
+ BackupUtil.copyTableRegionInfo(backupContext, conf);
+ this.incrementalCopy(backupContext);
+ // Save list of WAL files copied
+ backupManager.recordWALFiles(backupContext.getIncrBackupFileList());
+ } catch (Exception e) {
+ // fail the overall backup and return
+ this.failBackup(backupContext, e, "Unexpected exception doing incremental copy : ");
+ return null;
+ }
+ }
+
+ // set overall backup status: complete. Here we make sure to complete the backup. After this
+ // checkpoint, even if entering cancel process, will let the backup finished
+ backupContext.setState(BackupState.COMPLETE);
+
+ if (backupContext.getType() == BackupType.INCREMENTAL) {
+ // Set the previousTimestampMap which is before this current log roll to the manifest.
+ HashMap> previousTimestampMap =
+ backupManager.readLogTimestampMap();
+ backupContext.setIncrTimestampMap(previousTimestampMap);
+ }
+
+ // The table list in backupContext is good for both full backup and incremental backup.
+ // For incremental backup, it contains the incremental backup table set.
+ backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps);
+
+ HashMap> newTableSetTimestampMap =
+ backupManager.readLogTimestampMap();
+
+ Long newStartCode =
+ BackupUtility.getMinValue(BackupUtil.getRSLogTimestampMins(newTableSetTimestampMap));
+ backupManager.writeBackupStartCode(newStartCode);
+
+ // backup complete
+ this.completeBackup(backupContext);
+ } catch (Exception e) {
+ // even during completing backup (#completeBackup(backupContext)), exception may occur, or
+ // exception occur during other process, fail the backup finally
+ this.failBackup(backupContext, e, "Error caught during backup progress: ");
+ }
+ return null;
+ }
+
+ /**
+ * Begin the overall backup.
+ * @param backupContext backup context
+ * @throws IOException exception
+ */
+ private void beginBackup(BackupContext backupContext) throws IOException {
+ // set the start timestamp of the overall backup
+ long startTs = EnvironmentEdgeManager.currentTime();
+ backupContext.setStartTs(startTs);
+ // set overall backup status: ongoing
+ backupContext.setState(BackupState.RUNNING);
+ LOG.info("Backup " + backupContext.getBackupId() + " started at " + startTs + ".");
+
+ backupManager.updateBackupStatus(backupContext);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Backup session " + backupContext.getBackupId() + " has been started.");
+ }
+ }
+
+ /**
+ * Snapshot for full table backup.
+ * @param backupContext backup context
+ * @throws IOException exception
+ */
+ private void snapshotForFullBackup(BackupContext backupContext) throws IOException {
+ LOG.info("HBase snapshot full backup for " + backupContext.getBackupId());
+
+ // avoid action if has been cancelled
+ if (backupContext.isCancelled()) {
+ return;
+ }
+
+ try (Admin admin = conn.getAdmin()) {
+ // we do HBase snapshot for tables in the table list one by one currently
+ for (TableName table : backupContext.getTables()) {
+ // avoid action if it has been cancelled
+ if (backupContext.isCancelled()) {
+ return;
+ }
+
+ HBaseProtos.SnapshotDescription backupSnapshot;
+
+ // wrap a SnapshotDescription for offline/online snapshot
+ backupSnapshot = this.wrapSnapshotDescription(table);
+
+ try {
+ // Kick off snapshot for backup
+ admin.snapshot(backupSnapshot);
+ } catch (Exception e) {
+ LOG.error("Snapshot failed to create " + getMessage(e));
+
+ // currently, we fail the overall backup if any table in the list failed, so throw the
+ // exception out for overall backup failing
+ throw new BackupException("Backup snapshot failed on table " + table, e);
+ }
+
+ // set the snapshot name in BackupStatus of this table, only after snapshot success.
+ backupContext.setSnapshotName(table, backupSnapshot.getName());
+ }
+ }
+ }
+
+ /**
+ * Fail the overall backup.
+ * @param backupContext backup context
+ * @param e exception
+ * @throws Exception exception
+ */
+ private void failBackup(BackupContext backupContext, Exception e, String msg) throws Exception {
+ LOG.error(msg + getMessage(e));
+ // If this is a cancel exception, then we've already cleaned.
+
+ if (this.backupContext.getState().equals(BackupState.CANCELLED)) {
+ return;
+ }
+
+ // set the failure timestamp of the overall backup
+ backupContext.setEndTs(EnvironmentEdgeManager.currentTime());
+
+ // set failure message
+ backupContext.setFailedMsg(e.getMessage());
+
+ // set overall backup status: failed
+ backupContext.setState(BackupState.FAILED);
+
+ // compose the backup failed data
+ String backupFailedData =
+ "BackupId=" + backupContext.getBackupId() + ",startts=" + backupContext.getStartTs()
+ + ",failedts=" + backupContext.getEndTs() + ",failedphase=" + backupContext.getPhase()
+ + ",failedmessage=" + backupContext.getFailedMsg();
+ LOG.error(backupFailedData);
+
+ backupManager.updateBackupStatus(backupContext);
+
+ // if full backup, then delete HBase snapshots if there already have snapshots taken
+ // and also clean up export snapshot log files if exist
+ if (backupContext.getType() == BackupType.FULL) {
+ this.deleteSnapshot(backupContext);
+ this.cleanupExportSnapshotLog();
+ } /*
+ * else { // support incremental backup code in future jira // TODO. See HBASE-14124 }
+ */
+
+ // clean up the uncompleted data at target directory if the ongoing backup has already entered
+ // the copy phase
+ // For incremental backup, DistCp logs will be cleaned with the targetDir.
+ this.cleanupTargetDir();
+
+ LOG.info("Backup " + backupContext.getBackupId() + " failed.");
+ }
+
+ /**
+ * Update the ongoing back token znode with new progress.
+ * @param backupContext backup context
+ *
+ * @param newProgress progress
+ * @param bytesCopied bytes copied
+ * @throws NoNodeException exception
+ */
+ public static void updateProgress(BackupContext backupContext, BackupManager backupManager,
+ int newProgress, long bytesCopied) throws IOException {
+ // compose the new backup progress data, using fake number for now
+ String backupProgressData = newProgress + "%";
+
+ backupContext.setProgress(newProgress);
+ backupManager.updateBackupStatus(backupContext);
+ LOG.debug("Backup progress data \"" + backupProgressData
+ + "\" has been updated to hbase:backup for " + backupContext.getBackupId());
+ }
+
+ /**
+ * Complete the overall backup.
+ * @param backupContext backup context
+ * @throws Exception exception
+ */
+ private void completeBackup(BackupContext backupContext) throws Exception {
+
+ // set the complete timestamp of the overall backup
+ backupContext.setEndTs(EnvironmentEdgeManager.currentTime());
+ // set overall backup status: complete
+ backupContext.setState(BackupState.COMPLETE);
+ // add and store the manifest for the backup
+ this.addManifest(backupContext);
+
+ // after major steps done and manifest persisted, do convert if needed for incremental backup
+ /* in-fly convert code here, provided by future jira */
+ LOG.debug("in-fly convert code here, provided by future jira");
+
+ // compose the backup complete data
+ String backupCompleteData =
+ this.obtainBackupMetaDataStr(backupContext) + ",startts=" + backupContext.getStartTs()
+ + ",completets=" + backupContext.getEndTs() + ",bytescopied="
+ + backupContext.getTotalBytesCopied();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Backup " + backupContext.getBackupId() + " finished: " + backupCompleteData);
+ }
+ backupManager.updateBackupStatus(backupContext);
+
+ // when full backup is done:
+ // - delete HBase snapshot
+ // - clean up directories with prefix "exportSnapshot-", which are generated when exporting
+ // snapshots
+ if (backupContext.getType() == BackupType.FULL) {
+ this.deleteSnapshot(backupContext);
+ this.cleanupExportSnapshotLog();
+ } else if (backupContext.getType() == BackupType.INCREMENTAL) {
+ this.cleanupDistCpLog();
+ }
+
+ LOG.info("Backup " + backupContext.getBackupId() + " completed.");
+ }
+
+ /**
+ * Get backup request meta data dir as string.
+ * @param backupContext backup context
+ * @return meta data dir
+ */
+ private String obtainBackupMetaDataStr(BackupContext backupContext) {
+ StringBuffer sb = new StringBuffer();
+ sb.append("type=" + backupContext.getType() + ",tablelist=");
+ for (TableName table : backupContext.getTables()) {
+ sb.append(table + ";");
+ }
+ if (sb.lastIndexOf(";") > 0) {
+ sb.delete(sb.lastIndexOf(";"), sb.lastIndexOf(";") + 1);
+ }
+ sb.append(",targetRootDir=" + backupContext.getTargetRootDir());
+
+ return sb.toString();
+ }
+
+ /**
+ * Do snapshot copy.
+ * @param backupContext backup context
+ * @throws Exception exception
+ */
+ private void snapshotCopy(BackupContext backupContext) throws Exception {
+ LOG.info("Snapshot copy is starting.");
+
+ // set overall backup phase: snapshot_copy
+ backupContext.setPhase(BackupPhase.SNAPSHOTCOPY);
+
+ // avoid action if has been cancelled
+ if (backupContext.isCancelled()) {
+ return;
+ }
+
+ // call ExportSnapshot to copy files based on hbase snapshot for backup
+ // ExportSnapshot only support single snapshot export, need loop for multiple tables case
+ BackupCopyService copyService = BackupRestoreFactory.getBackupCopyService(conf);
+
+ // number of snapshots matches number of tables
+ float numOfSnapshots = backupContext.getSnapshotNames().size();
+
+ LOG.debug("There are " + (int) numOfSnapshots + " snapshots to be copied.");
+
+ for (TableName table : backupContext.getTables()) {
+ // Currently we simply set the sub copy tasks by counting the table snapshot number, we can
+ // calculate the real files' size for the percentage in the future.
+ // TODO this below
+ // backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots);
+ int res = 0;
+ String[] args = new String[4];
+ args[0] = "-snapshot";
+ args[1] = backupContext.getSnapshotName(table);
+ args[2] = "-copy-to";
+ args[3] = backupContext.getBackupStatus(table).getTargetDir();
+
+ LOG.debug("Copy snapshot " + args[1] + " to " + args[3]);
+ res = copyService.copy(backupContext, backupManager, conf, BackupCopyService.Type.FULL, args);
+ // if one snapshot export failed, do not continue for remained snapshots
+ if (res != 0) {
+ LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + ".");
+
+ throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3]
+ + " with reason code " + res);
+ }
+
+ LOG.info("Snapshot copy " + args[1] + " finished.");
+ }
+ }
+
+ /**
+ * Wrap a SnapshotDescription for a target table.
+ * @param table table
+ * @return a SnapshotDescription especially for backup.
+ */
+ private SnapshotDescription wrapSnapshotDescription(TableName tableName) {
+ // Mock a SnapshotDescription from backupContext to call SnapshotManager function,
+ // Name it in the format "snapshot__"
+ HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder();
+ builder.setTable(tableName.getNameAsString());
+ builder.setName("snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_"
+ + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString());
+ HBaseProtos.SnapshotDescription backupSnapshot = builder.build();
+
+ LOG.debug("Wrapped a SnapshotDescription " + backupSnapshot.getName()
+ + " from backupContext to request snapshot for backup.");
+
+ return backupSnapshot;
+ }
+
+ /**
+ * Delete HBase snapshot for backup.
+ * @param backupCtx backup context
+ * @throws Exception exception
+ */
+ private void deleteSnapshot(BackupContext backupCtx) throws IOException {
+
+ LOG.debug("Trying to delete snapshot for full backup.");
+ Connection conn = null;
+ Admin admin = null;
+ try {
+ conn = ConnectionFactory.createConnection(conf);
+ admin = conn.getAdmin();
+ for (String snapshotName : backupCtx.getSnapshotNames()) {
+ if (snapshotName == null) {
+ continue;
+ }
+ LOG.debug("Trying to delete snapshot: " + snapshotName);
+ admin.deleteSnapshot(snapshotName);
+ LOG.debug("Deleting the snapshot " + snapshotName + " for backup "
+ + backupCtx.getBackupId() + " succeeded.");
+ }
+ } finally {
+ if (admin != null) {
+ admin.close();
+ }
+ if (conn != null) {
+ conn.close();
+ }
+ }
+ }
+
+ /**
+ * Clean up directories with prefix "exportSnapshot-", which are generated when exporting
+ * snapshots.
+ * @throws IOException exception
+ */
+ private void cleanupExportSnapshotLog() throws IOException {
+ FileSystem fs = FSUtils.getCurrentFileSystem(conf);
+ Path stagingDir =
+ new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory()
+ .toString()));
+ FileStatus[] files = FSUtils.listStatus(fs, stagingDir);
+ if (files == null) {
+ return;
+ }
+ for (FileStatus file : files) {
+ if (file.getPath().getName().startsWith("exportSnapshot-")) {
+ LOG.debug("Delete log files of exporting snapshot: " + file.getPath().getName());
+ if (FSUtils.delete(fs, file.getPath(), true) == false) {
+ LOG.warn("Can not delete " + file.getPath());
+ }
+ }
+ }
+ }
+
+ /**
+ * Clean up directories with prefix "_distcp_logs-", which are generated when DistCp copying
+ * hlogs.
+ * @throws IOException exception
+ */
+ private void cleanupDistCpLog() throws IOException {
+ Path rootPath = new Path(backupContext.getHLogTargetDir()).getParent();
+ FileSystem fs = FileSystem.get(rootPath.toUri(), conf);
+ FileStatus[] files = FSUtils.listStatus(fs, rootPath);
+ if (files == null) {
+ return;
+ }
+ for (FileStatus file : files) {
+ if (file.getPath().getName().startsWith("_distcp_logs")) {
+ LOG.debug("Delete log files of DistCp: " + file.getPath().getName());
+ FSUtils.delete(fs, file.getPath(), true);
+ }
+ }
+ }
+
+ /**
+ * Clean up the uncompleted data at target directory if the ongoing backup has already entered the
+ * copy phase.
+ */
+ private void cleanupTargetDir() {
+ try {
+ // clean up the uncompleted data at target directory if the ongoing backup has already entered
+ // the copy phase
+ LOG.debug("Trying to cleanup up target dir. Current backup phase: "
+ + backupContext.getPhase());
+ if (backupContext.getPhase().equals(BackupPhase.SNAPSHOTCOPY)
+ || backupContext.getPhase().equals(BackupPhase.INCREMENTAL_COPY)
+ || backupContext.getPhase().equals(BackupPhase.STORE_MANIFEST)) {
+ FileSystem outputFs =
+ FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf);
+
+ // now treat one backup as a transaction, clean up data that has been partially copied at
+ // table level
+ for (TableName table : backupContext.getTables()) {
+ Path targetDirPath =
+ new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(),
+ backupContext.getBackupId(), table));
+ if (outputFs.delete(targetDirPath, true)) {
+ LOG.info("Cleaning up uncompleted backup data at " + targetDirPath.toString()
+ + " done.");
+ } else {
+ LOG.info("No data has been copied to " + targetDirPath.toString() + ".");
+ }
+
+ Path tableDir = targetDirPath.getParent();
+ FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir);
+ if (backups == null || backups.length == 0) {
+ outputFs.delete(tableDir, true);
+ LOG.debug(tableDir.toString() + " is empty, remove it.");
+ }
+ }
+ }
+
+ } catch (IOException e1) {
+ LOG.error("Cleaning up uncompleted backup data of " + backupContext.getBackupId() + " at "
+ + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + ".");
+ }
+ }
+
+ /**
+ * Add manifest for the current backup. The manifest is stored
+ * within the table backup directory.
+ * @param backupContext The current backup context
+ * @throws IOException exception
+ * @throws BackupException exception
+ */
+ private void addManifest(BackupContext backupContext) throws IOException, BackupException {
+ // set the overall backup phase : store manifest
+ backupContext.setPhase(BackupPhase.STORE_MANIFEST);
+
+ // avoid action if has been cancelled
+ if (backupContext.isCancelled()) {
+ return;
+ }
+
+ BackupManifest manifest;
+
+ // Since we have each table's backup in its own directory structure,
+ // we'll store its manifest with the table directory.
+ for (TableName table : backupContext.getTables()) {
+ manifest = new BackupManifest(backupContext, table);
+ ArrayList ancestors = this.backupManager.getAncestors(backupContext, table);
+ for (BackupImage image : ancestors) {
+ manifest.addDependentImage(image);
+ }
+
+ if (backupContext.getType() == BackupType.INCREMENTAL) {
+ // We'll store the log timestamps for this table only in its manifest.
+ HashMap> tableTimestampMap =
+ new HashMap>();
+ tableTimestampMap.put(table, backupContext.getIncrTimestampMap().get(table));
+ manifest.setIncrTimestampMap(tableTimestampMap);
+ }
+ manifest.store(conf);
+ }
+
+ // For incremental backup, we store a overall manifest in
+ // /WALs/
+ // This is used when created the next incremental backup
+ if (backupContext.getType() == BackupType.INCREMENTAL) {
+ manifest = new BackupManifest(backupContext);
+ // set the table region server start and end timestamps for incremental backup
+ manifest.setIncrTimestampMap(backupContext.getIncrTimestampMap());
+ ArrayList ancestors = this.backupManager.getAncestors(backupContext);
+ for (BackupImage image : ancestors) {
+ manifest.addDependentImage(image);
+ }
+ manifest.store(conf);
+ }
+ }
+
+ /**
+ * Do incremental copy.
+ * @param backupContext backup context
+ */
+ private void incrementalCopy(BackupContext backupContext) throws Exception {
+
+ LOG.info("Incremental copy is starting.");
+
+ // set overall backup phase: incremental_copy
+ backupContext.setPhase(BackupPhase.INCREMENTAL_COPY);
+
+ // avoid action if has been cancelled
+ if (backupContext.isCancelled()) {
+ return;
+ }
+
+ // get incremental backup file list and prepare parms for DistCp
+ List incrBackupFileList = backupContext.getIncrBackupFileList();
+ // filter missing files out (they have been copied by previous backups)
+ incrBackupFileList = filterMissingFiles(incrBackupFileList);
+ String[] strArr = incrBackupFileList.toArray(new String[incrBackupFileList.size() + 1]);
+ strArr[strArr.length - 1] = backupContext.getHLogTargetDir();
+
+ BackupCopyService copyService = BackupRestoreFactory.getBackupCopyService(conf);
+ int res = copyService.copy(backupContext, backupManager, conf,
+ BackupCopyService.Type.INCREMENTAL, strArr);
+
+ if (res != 0) {
+ LOG.error("Copy incremental log files failed with return code: " + res + ".");
+ throw new IOException("Failed of Hadoop Distributed Copy from " + incrBackupFileList + " to "
+ + backupContext.getHLogTargetDir());
+ }
+ LOG.info("Incremental copy from " + incrBackupFileList + " to "
+ + backupContext.getHLogTargetDir() + " finished.");
+
+ }
+
+ private List filterMissingFiles(List incrBackupFileList) throws IOException {
+ FileSystem fs = FileSystem.get(conf);
+ List list = new ArrayList();
+ for(String file : incrBackupFileList){
+ if(fs.exists(new Path(file))){
+ list.add(file);
+ } else{
+ LOG.warn("Can't find file: "+file);
+ }
+ }
+ return list;
+ }
+
+ private String getMessage(Exception e) {
+ String msg = e.getMessage();
+ if (msg == null || msg.equals("")) {
+ msg = e.getClass().getName();
+ }
+ return msg;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
new file mode 100644
index 0000000..a4b0a0a
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
@@ -0,0 +1,512 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState;
+import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
+import org.apache.hadoop.hbase.backup.impl.BackupUtil.BackupCompleteData;
+import org.apache.hadoop.hbase.backup.master.BackupLogCleaner;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+
+/**
+ * Handles backup requests on server-side, creates backup context records in hbase:backup
+ * to keep track backup. The timestamps kept in hbase:backup table will be used for future
+ * incremental backup. Creates BackupContext and DispatchRequest.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupManager implements Closeable {
+ private static final Log LOG = LogFactory.getLog(BackupManager.class);
+
+ private Configuration conf = null;
+ private BackupContext backupContext = null;
+
+ private ExecutorService pool = null;
+
+ private boolean backupComplete = false;
+
+ private BackupSystemTable systemTable;
+
+ private final Connection conn;
+
+ /**
+ * Backup manager constructor.
+ * @param conf configuration
+ * @throws IOException exception
+ */
+ public BackupManager(Configuration conf) throws IOException {
+ if (!conf.getBoolean(HConstants.BACKUP_ENABLE_KEY, HConstants.BACKUP_ENABLE_DEFAULT)) {
+ throw new BackupException("HBase backup is not enabled. Check your " +
+ HConstants.BACKUP_ENABLE_KEY + " setting.");
+ }
+ this.conf = conf;
+ this.conn = ConnectionFactory.createConnection(conf); // TODO: get Connection from elsewhere?
+ this.systemTable = new BackupSystemTable(conn);
+ Runtime.getRuntime().addShutdownHook(new ExitHandler());
+ }
+
+ /**
+ * This method modifies the master's configuration in order to inject backup-related features
+ * @param conf configuration
+ */
+ public static void decorateMasterConfiguration(Configuration conf) {
+ if (!isBackupEnabled(conf)) {
+ return;
+ }
+ String plugins = conf.get(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS);
+ String cleanerClass = BackupLogCleaner.class.getCanonicalName();
+ if (!plugins.contains(cleanerClass)) {
+ conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, plugins + "," + cleanerClass);
+ }
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Added log cleaner: " + cleanerClass);
+ }
+ }
+
+ private static boolean isBackupEnabled(Configuration conf) {
+ return conf.getBoolean(HConstants.BACKUP_ENABLE_KEY, HConstants.BACKUP_ENABLE_DEFAULT);
+ }
+
+ // TODO: remove this on the server side
+ private class ExitHandler extends Thread {
+ public ExitHandler() {
+ super("Backup Manager Exit Handler");
+ }
+
+ @Override
+ public void run() {
+ if (backupContext != null && !backupComplete) {
+
+ // program exit and backup is not complete, then mark as cancelled to avoid submitted backup
+ // handler's taking further action
+ backupContext.setCancelled(true);
+
+ LOG.debug("Backup is cancelled due to force program exiting.");
+ try {
+ cancelBackup(backupContext.getBackupId());
+ } catch (Exception e) {
+ String msg = e.getMessage();
+ if (msg == null || msg.equals("")) {
+ msg = e.getClass().getName();
+ }
+ LOG.error("Failed to cancel backup " + backupContext.getBackupId() + " due to " + msg);
+ }
+ }
+ close();
+ }
+ }
+
+ /**
+ * Get configuration
+ * @return configuration
+ */
+ Configuration getConf() {
+ return conf;
+ }
+
+ /**
+ * Cancel the ongoing backup via backup id.
+ * @param backupId The id of the ongoing backup to be cancelled
+ * @throws Exception exception
+ */
+ private void cancelBackup(String backupId) throws Exception {
+ // TODO: will be implemented in Phase 2: HBASE-14125
+ LOG.debug("Try to cancel the backup " + backupId + ". the feature is NOT implemented yet");
+
+ }
+
+ /**
+ * Stop all the work of backup.
+ */
+ @Override
+ public void close() {
+ // currently, we shutdown now for all ongoing back handlers, we may need to do something like
+ // record the failed list somewhere later
+ if (this.pool != null) {
+ this.pool.shutdownNow();
+ }
+ if (systemTable != null) {
+ try{
+ systemTable.close();
+ } catch(Exception e){
+ LOG.error(e);
+ }
+ }
+ if (conn != null) {
+ try {
+ conn.close();
+ } catch (IOException e) {
+ LOG.error(e);
+ }
+ }
+ }
+
+ /**
+ * Create a BackupContext based on input backup request.
+ * @param backupId backup id
+ * @param type type
+ * @param tablelist table list
+ * @param targetRootDir root dir
+ * @param snapshot snapshot name
+ * @return BackupContext context
+ * @throws BackupException exception
+ */
+ protected BackupContext createBackupContext(String backupId, BackupType type,
+ List tableList, String targetRootDir) throws BackupException {
+
+ if (targetRootDir == null) {
+ throw new BackupException("Wrong backup request parameter: target backup root directory");
+ }
+
+ if (type == BackupType.FULL && tableList == null) {
+ // If table list is null for full backup, which means backup all tables. Then fill the table
+ // list with all user tables from meta. It no table available, throw the request exception.
+
+ HTableDescriptor[] htds = null;
+ try (Admin hbadmin = conn.getAdmin()) {
+ htds = hbadmin.listTables();
+ } catch (Exception e) {
+ throw new BackupException(e);
+ }
+
+ if (htds == null) {
+ throw new BackupException("No table exists for full backup of all tables.");
+ } else {
+ tableList = new ArrayList<>();
+ for (HTableDescriptor hTableDescriptor : htds) {
+ tableList.add(hTableDescriptor.getTableName());
+ }
+
+ LOG.info("Full backup all the tables available in the cluster: " + tableList);
+ }
+ }
+
+ // there are one or more tables in the table list
+ return new BackupContext(backupId, type, tableList.toArray(new TableName[tableList.size()]),
+ targetRootDir);
+ }
+
+ /**
+ * Check if any ongoing backup. Currently, we only reply on checking status in hbase:backup. We
+ * need to consider to handle the case of orphan records in the future. Otherwise, all the coming
+ * request will fail.
+ * @return the ongoing backup id if on going backup exists, otherwise null
+ * @throws IOException exception
+ */
+ private String getOngoingBackupId() throws IOException {
+
+ ArrayList sessions = systemTable.getBackupContexts(BackupState.RUNNING);
+ if (sessions.size() == 0) {
+ return null;
+ }
+ return sessions.get(0).getBackupId();
+ }
+
+ /**
+ * Start the backup manager service.
+ * @throws IOException exception
+ */
+ public void initialize() throws IOException {
+ String ongoingBackupId = this.getOngoingBackupId();
+ if (ongoingBackupId != null) {
+ LOG.info("There is a ongoing backup " + ongoingBackupId
+ + ". Can not launch new backup until no ongoing backup remains.");
+ throw new BackupException("There is ongoing backup.");
+ }
+
+ // Initialize thread pools
+ int nrThreads = this.conf.getInt("hbase.backup.threads.max", 1);
+ ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
+ builder.setNameFormat("BackupHandler-%1$d");
+ this.pool =
+ new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS,
+ new LinkedBlockingQueue(), builder.build());
+ ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
+ }
+
+ /**
+ * Dispatch and handle a backup request.
+ * @param backupContext backup context
+ * @throws BackupException exception
+ */
+ public void dispatchRequest(BackupContext backupContext) throws BackupException {
+
+ this.backupContext = backupContext;
+
+ LOG.info("Got a backup request: " + "Type: " + backupContext.getType() + "; Tables: "
+ + backupContext.getTableNames() + "; TargetRootDir: " + backupContext.getTargetRootDir());
+
+ // dispatch the request to a backup handler and put it handler map
+
+ BackupHandler handler = new BackupHandler(this.backupContext, this, conf, this.conn);
+ Future future = this.pool.submit(handler);
+ // wait for the execution to complete
+ try {
+ future.get();
+ } catch (InterruptedException e) {
+ throw new BackupException(e);
+ } catch (CancellationException e) {
+ throw new BackupException(e);
+ } catch (ExecutionException e) {
+ throw new BackupException(e);
+ }
+
+ // mark the backup complete for exit handler's processing
+ backupComplete = true;
+
+ LOG.info("Backup request " + backupContext.getBackupId() + " has been executed.");
+ }
+
+ /**
+ * Get direct ancestors of the current backup.
+ * @param backupCtx The backup context for the current backup
+ * @return The ancestors for the current backup
+ * @throws IOException exception
+ * @throws BackupException exception
+ */
+ protected ArrayList getAncestors(BackupContext backupCtx) throws IOException,
+ BackupException {
+ LOG.debug("Getting the direct ancestors of the current backup ...");
+
+ ArrayList ancestors = new ArrayList();
+
+ // full backup does not have ancestor
+ if (backupCtx.getType() == BackupType.FULL) {
+ LOG.debug("Current backup is a full backup, no direct ancestor for it.");
+ return ancestors;
+ }
+
+ // get all backup history list in descending order
+
+ ArrayList allHistoryList = getBackupHistory();
+ for (BackupCompleteData backup : allHistoryList) {
+ BackupImage image =
+ new BackupImage(backup.getBackupToken(), BackupType.valueOf(backup.getType()),
+ backup.getBackupRootPath(),
+ backup.getTableList(), Long.parseLong(backup.getStartTime()), Long.parseLong(backup
+ .getEndTime()));
+ // add the full backup image as an ancestor until the last incremental backup
+ if (backup.getType().equals(BackupType.FULL.toString())) {
+ // check the backup image coverage, if previous image could be covered by the newer ones,
+ // then no need to add
+ if (!BackupManifest.canCoverImage(ancestors, image)) {
+ ancestors.add(image);
+ }
+ } else {
+ // found last incremental backup, if previously added full backup ancestor images can cover
+ // it, then this incremental ancestor is not the dependent of the current incremental
+ // backup, that is to say, this is the backup scope boundary of current table set.
+ // Otherwise, this incremental backup ancestor is the dependent ancestor of the ongoing
+ // incremental backup
+ if (BackupManifest.canCoverImage(ancestors, image)) {
+ LOG.debug("Met the backup boundary of the current table set. "
+ + "The root full backup images for the current backup scope:");
+ for (BackupImage image1 : ancestors) {
+ LOG.debug(" BackupId: " + image1.getBackupId() + ", Backup directory: "
+ + image1.getRootDir());
+ }
+ } else {
+ Path logBackupPath =
+ HBackupFileSystem.getLogBackupPath(backup.getBackupRootPath(),
+ backup.getBackupToken());
+ LOG.debug("Current backup has an incremental backup ancestor, "
+ + "touching its image manifest in " + logBackupPath.toString()
+ + " to construct the dependency.");
+
+ BackupManifest lastIncrImgManifest = new BackupManifest(conf, logBackupPath);
+ BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage();
+ ancestors.add(lastIncrImage);
+
+ LOG.debug("Last dependent incremental backup image information:");
+ LOG.debug(" Token: " + lastIncrImage.getBackupId());
+ LOG.debug(" Backup directory: " + lastIncrImage.getRootDir());
+ }
+ }
+ }
+ LOG.debug("Got " + ancestors.size() + " ancestors for the current backup.");
+ return ancestors;
+ }
+
+ /**
+ * Get the direct ancestors of this backup for one table involved.
+ * @param backupContext backup context
+ * @param table table
+ * @return backupImages on the dependency list
+ * @throws BackupException exception
+ * @throws IOException exception
+ */
+ protected ArrayList getAncestors(BackupContext backupContext, TableName table)
+ throws BackupException, IOException {
+ ArrayList ancestors = getAncestors(backupContext);
+ ArrayList tableAncestors = new ArrayList();
+ for (BackupImage image : ancestors) {
+ if (image.hasTable(table)) {
+ tableAncestors.add(image);
+ if (image.getType() == BackupType.FULL) {
+ break;
+ }
+ }
+ }
+ return tableAncestors;
+ }
+
+ /*
+ * hbase:backup operations
+ */
+
+ /**
+ * Updates status (state) of a backup session in a persistent store
+ * @param context context
+ * @throws IOException exception
+ */
+ public void updateBackupStatus(BackupContext context) throws IOException {
+ systemTable.updateBackupStatus(context);
+ }
+
+ /**
+ * Read the last backup start code (timestamp) of last successful backup. Will return null
+ * if there is no startcode stored in hbase:backup or the value is of length 0. These two
+ * cases indicate there is no successful backup completed so far.
+ * @return the timestamp of a last successful backup
+ * @throws IOException exception
+ */
+ public String readBackupStartCode() throws IOException {
+ return systemTable.readBackupStartCode();
+ }
+
+ /**
+ * Write the start code (timestamp) to hbase:backup. If passed in null, then write 0 byte.
+ * @param startCode start code
+ * @throws IOException exception
+ */
+ public void writeBackupStartCode(Long startCode) throws IOException {
+ systemTable.writeBackupStartCode(startCode);
+ }
+
+ /**
+ * Get the RS log information after the last log roll from hbase:backup.
+ * @return RS log info
+ * @throws IOException exception
+ */
+ public HashMap readRegionServerLastLogRollResult() throws IOException {
+ return systemTable.readRegionServerLastLogRollResult();
+ }
+
+ /**
+ * Get all completed backup information (in desc order by time)
+ * @return history info of BackupCompleteData
+ * @throws IOException exception
+ */
+ public ArrayList getBackupHistory() throws IOException {
+ return systemTable.getBackupHistory();
+ }
+
+ /**
+ * Write the current timestamps for each regionserver to hbase:backup after a successful full or
+ * incremental backup. Each table may have a different set of log timestamps. The saved timestamp
+ * is of the last log file that was backed up already.
+ * @param tables tables
+ * @throws IOException exception
+ */
+ public void writeRegionServerLogTimestamp(Set tables,
+ HashMap newTimestamps) throws IOException {
+ systemTable.writeRegionServerLogTimestamp(tables, newTimestamps);
+ }
+
+ /**
+ * Read the timestamp for each region server log after the last successful backup. Each table has
+ * its own set of the timestamps.
+ * @return the timestamp for each region server. key: tableName value:
+ * RegionServer,PreviousTimeStamp
+ * @throws IOException exception
+ */
+ public HashMap> readLogTimestampMap() throws IOException {
+ return systemTable.readLogTimestampMap();
+ }
+
+ /**
+ * Return the current tables covered by incremental backup.
+ * @return set of tableNames
+ * @throws IOException exception
+ */
+ public Set getIncrementalBackupTableSet() throws IOException {
+ return systemTable.getIncrementalBackupTableSet();
+ }
+
+ /**
+ * Adds set of tables to overall incremental backup table set
+ * @param tables tables
+ * @throws IOException exception
+ */
+ public void addIncrementalBackupTableSet(Set tables) throws IOException {
+ systemTable.addIncrementalBackupTableSet(tables);
+ }
+
+ /**
+ * Saves list of WAL files after incremental backup operation. These files will be stored until
+ * TTL expiration and are used by Backup Log Cleaner plugin to determine which WAL files can be
+ * safely purged.
+ */
+ public void recordWALFiles(List files) throws IOException {
+ systemTable.addWALFiles(files, backupContext.getBackupId());
+ }
+
+ /**
+ * Get WAL files iterator
+ * @return WAL files iterator from hbase:backup
+ * @throws IOException
+ */
+ public Iterator getWALFilesFromBackupSystem() throws IOException {
+ return systemTable.getWALFilesIterator();
+ }
+
+ public Connection getConnection() {
+ return conn;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
new file mode 100644
index 0000000..6264fc5
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
@@ -0,0 +1,762 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.BackupProtos;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+import com.google.protobuf.InvalidProtocolBufferException;
+
+
+/**
+ * Backup manifest Contains all the meta data of a backup image. The manifest info will be bundled
+ * as manifest file together with data. So that each backup image will contain all the info needed
+ * for restore.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupManifest {
+
+ private static final Log LOG = LogFactory.getLog(BackupManifest.class);
+
+ // manifest file name
+ public static final String MANIFEST_FILE_NAME = ".backup.manifest";
+
+ // manifest file version, current is 1.0
+ public static final String MANIFEST_VERSION = "1.0";
+
+ // backup image, the dependency graph is made up by series of backup images
+
+ public static class BackupImage implements Comparable {
+
+ private String backupId;
+ private BackupType type;
+ private String rootDir;
+ private List tableList;
+ private long startTs;
+ private long completeTs;
+ private ArrayList ancestors;
+
+ public BackupImage() {
+ super();
+ }
+
+ public BackupImage(String backupId, BackupType type, String rootDir,
+ List tableList, long startTs, long completeTs) {
+ this.backupId = backupId;
+ this.type = type;
+ this.rootDir = rootDir;
+ this.tableList = tableList;
+ this.startTs = startTs;
+ this.completeTs = completeTs;
+ }
+
+ static BackupImage fromProto(BackupProtos.BackupImage im) {
+ String backupId = im.getBackupId();
+ String rootDir = im.getRootDir();
+ long startTs = im.getStartTs();
+ long completeTs = im.getCompleteTs();
+ List tableListList = im.getTableListList();
+ List tableList = new ArrayList();
+ for(HBaseProtos.TableName tn : tableListList) {
+ tableList.add(ProtobufUtil.toTableName(tn));
+ }
+ BackupType type =
+ im.getBackupType() == BackupProtos.BackupType.FULL ? BackupType.FULL:
+ BackupType.INCREMENTAL;
+
+ return new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs);
+ }
+
+ BackupProtos.BackupImage toProto() {
+ BackupProtos.BackupImage.Builder builder = BackupProtos.BackupImage.newBuilder();
+ builder.setBackupId(backupId);
+ builder.setCompleteTs(completeTs);
+ builder.setStartTs(startTs);
+ builder.setRootDir(rootDir);
+ if (type == BackupType.FULL) {
+ builder.setBackupType(BackupProtos.BackupType.FULL);
+ } else{
+ builder.setBackupType(BackupProtos.BackupType.INCREMENTAL);
+ }
+
+ for (TableName name: tableList) {
+ builder.addTableList(ProtobufUtil.toProtoTableName(name));
+ }
+
+ if (ancestors != null){
+ for (BackupImage im: ancestors){
+ builder.addAncestors(im.toProto());
+ }
+ }
+
+ return builder.build();
+ }
+
+ public String getBackupId() {
+ return backupId;
+ }
+
+ public void setBackupId(String backupId) {
+ this.backupId = backupId;
+ }
+
+ public BackupType getType() {
+ return type;
+ }
+
+ public void setType(BackupType type) {
+ this.type = type;
+ }
+
+ public String getRootDir() {
+ return rootDir;
+ }
+
+ public void setRootDir(String rootDir) {
+ this.rootDir = rootDir;
+ }
+
+ public List getTableNames() {
+ return tableList;
+ }
+
+ public void setTableList(List tableList) {
+ this.tableList = tableList;
+ }
+
+ public long getStartTs() {
+ return startTs;
+ }
+
+ public void setStartTs(long startTs) {
+ this.startTs = startTs;
+ }
+
+ public long getCompleteTs() {
+ return completeTs;
+ }
+
+ public void setCompleteTs(long completeTs) {
+ this.completeTs = completeTs;
+ }
+
+ public ArrayList getAncestors() {
+ if (this.ancestors == null) {
+ this.ancestors = new ArrayList();
+ }
+ return this.ancestors;
+ }
+
+ public void addAncestor(BackupImage backupImage) {
+ this.getAncestors().add(backupImage);
+ }
+
+ public boolean hasAncestor(String token) {
+ for (BackupImage image : this.getAncestors()) {
+ if (image.getBackupId().equals(token)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public boolean hasTable(TableName table) {
+ for (TableName t : tableList) {
+ if (t.getNameAsString().equals(table)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public int compareTo(BackupImage other) {
+ String thisBackupId = this.getBackupId();
+ String otherBackupId = other.getBackupId();
+ Long thisTS = new Long(thisBackupId.substring(thisBackupId.lastIndexOf("_") + 1));
+ Long otherTS = new Long(otherBackupId.substring(otherBackupId.lastIndexOf("_") + 1));
+ return thisTS.compareTo(otherTS);
+ }
+ }
+
+ // manifest version
+ private String version = MANIFEST_VERSION;
+
+ // hadoop hbase configuration
+ protected Configuration config = null;
+
+ // backup root directory
+ private String rootDir = null;
+
+ // backup image directory
+ private String tableBackupDir = null;
+
+ // backup log directory if this is an incremental backup
+ private String logBackupDir = null;
+
+ // backup token
+ private String backupId;
+
+ // backup type, full or incremental
+ private BackupType type;
+
+ // the table list for the backup
+ private ArrayList tableList;
+
+ // actual start timestamp of the backup process
+ private long startTs;
+
+ // actual complete timestamp of the backup process
+ private long completeTs;
+
+ // total bytes for table backup image
+ private long totalBytes;
+
+ // total bytes for the backed-up logs for incremental backup
+ private long logBytes;
+
+ // the region server timestamp for tables:
+ // >
+ private Map> incrTimeRanges;
+
+ // dependency of this backup, including all the dependent images to do PIT recovery
+ private Map dependency;
+
+ // the indicator of the image compaction
+ private boolean isCompacted = false;
+ /**
+ * Construct manifest for a ongoing backup.
+ * @param backupCtx The ongoing backup context
+ */
+ public BackupManifest(BackupContext backupCtx) {
+ this.backupId = backupCtx.getBackupId();
+ this.type = backupCtx.getType();
+ this.rootDir = backupCtx.getTargetRootDir();
+ if (this.type == BackupType.INCREMENTAL) {
+ this.logBackupDir = backupCtx.getHLogTargetDir();
+ this.logBytes = backupCtx.getTotalBytesCopied();
+ }
+ this.startTs = backupCtx.getStartTs();
+ this.completeTs = backupCtx.getEndTs();
+ this.loadTableList(backupCtx.getTableNames());
+ }
+
+ /**
+ * Construct a table level manifest for a backup of the named table.
+ * @param backupCtx The ongoing backup context
+ */
+ public BackupManifest(BackupContext backupCtx, TableName table) {
+ this.backupId = backupCtx.getBackupId();
+ this.type = backupCtx.getType();
+ this.rootDir = backupCtx.getTargetRootDir();
+ this.tableBackupDir = backupCtx.getBackupStatus(table).getTargetDir();
+ if (this.type == BackupType.INCREMENTAL) {
+ this.logBackupDir = backupCtx.getHLogTargetDir();
+ this.logBytes = backupCtx.getTotalBytesCopied();
+ }
+ this.startTs = backupCtx.getStartTs();
+ this.completeTs = backupCtx.getEndTs();
+ List tables = new ArrayList();
+ tables.add(table);
+ this.loadTableList(tables);
+ }
+
+ /**
+ * Construct manifest from a backup directory.
+ * @param conf configuration
+ * @param backupPath backup path
+ * @throws BackupException exception
+ */
+
+ public BackupManifest(Configuration conf, Path backupPath) throws BackupException {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Loading manifest from: " + backupPath.toString());
+ }
+ // The input backupDir may not exactly be the backup table dir.
+ // It could be the backup log dir where there is also a manifest file stored.
+ // This variable's purpose is to keep the correct and original location so
+ // that we can store/persist it.
+ this.tableBackupDir = backupPath.toString();
+ this.config = conf;
+ try {
+
+ FileSystem fs = backupPath.getFileSystem(conf);
+ FileStatus[] subFiles = FSUtils.listStatus(fs, backupPath);
+ if (subFiles == null) {
+ String errorMsg = backupPath.toString() + " does not exist";
+ LOG.error(errorMsg);
+ throw new IOException(errorMsg);
+ }
+ for (FileStatus subFile : subFiles) {
+ if (subFile.getPath().getName().equals(MANIFEST_FILE_NAME)) {
+
+ // load and set manifest field from file content
+ FSDataInputStream in = fs.open(subFile.getPath());
+ long len = subFile.getLen();
+ byte[] pbBytes = new byte[(int) len];
+ in.readFully(pbBytes);
+ BackupProtos.BackupManifest proto = null;
+ try{
+ proto = parseFrom(pbBytes);
+ } catch(Exception e){
+ throw new BackupException(e);
+ }
+ this.version = proto.getVersion();
+ this.backupId = proto.getBackupId();
+ this.type = BackupType.valueOf(proto.getType().name());
+ // Here the parameter backupDir is where the manifest file is.
+ // There should always be a manifest file under:
+ // backupRootDir/namespace/table/backupId/.backup.manifest
+ this.rootDir = backupPath.getParent().getParent().getParent().toString();
+
+ Path p = backupPath.getParent();
+ if (p.getName().equals(HConstants.HREGION_LOGDIR_NAME)) {
+ this.rootDir = p.getParent().toString();
+ } else {
+ this.rootDir = p.getParent().getParent().toString();
+ }
+
+ loadTableList(proto);
+ this.startTs = proto.getStartTs();
+ this.completeTs = proto.getCompleteTs();
+ this.totalBytes = proto.getTotalBytes();
+ if (this.type == BackupType.INCREMENTAL) {
+ this.logBytes = proto.getLogBytes();
+ //TODO: convert will be implemented by future jira
+ }
+
+ loadIncrementalTimestampMap(proto);
+ loadDependency(proto);
+ this.isCompacted = proto.getCompacted();
+ //TODO: merge will be implemented by future jira
+ LOG.debug("Loaded manifest instance from manifest file: "
+ + FSUtils.getPath(subFile.getPath()));
+ return;
+ }
+ }
+ String errorMsg = "No manifest file found in: " + backupPath.toString();
+ LOG.error(errorMsg);
+ throw new IOException(errorMsg);
+
+ } catch (IOException e) {
+ LOG.error(e);
+ throw new BackupException(e.getMessage());
+ }
+ }
+
+ private void loadIncrementalTimestampMap(BackupProtos.BackupManifest proto) {
+ List list = proto.getTstMapList();
+ if(list == null || list.size() == 0) return;
+ this.incrTimeRanges = new HashMap>();
+ for(BackupProtos.TableServerTimestamp tst: list){
+ TableName tn = ProtobufUtil.toTableName(tst.getTable());
+ HashMap map = this.incrTimeRanges.get(tn);
+ if(map == null){
+ map = new HashMap();
+ this.incrTimeRanges.put(tn, map);
+ }
+ List listSt = tst.getServerTimestampList();
+ for(BackupProtos.ServerTimestamp stm: listSt) {
+ map.put(stm.getServer(), stm.getTimestamp());
+ }
+ }
+ }
+
+ private void loadDependency(BackupProtos.BackupManifest proto) {
+ dependency = new HashMap();
+ List list = proto.getDependentBackupImageList();
+ for (BackupProtos.BackupImage im : list) {
+ dependency.put(im.getBackupId(), BackupImage.fromProto(im));
+ }
+ }
+
+ private void loadTableList(BackupProtos.BackupManifest proto) {
+ this.tableList = new ArrayList();
+ List list = proto.getTableListList();
+ for (HBaseProtos.TableName name: list) {
+ this.tableList.add(ProtobufUtil.toTableName(name));
+ }
+ }
+
+ public BackupType getType() {
+ return type;
+ }
+
+ public void setType(BackupType type) {
+ this.type = type;
+ }
+
+ /**
+ * Loads table list.
+ * @param tableList Table list
+ */
+ private void loadTableList(List tableList) {
+
+ this.tableList = this.getTableList();
+ if (this.tableList.size() > 0) {
+ this.tableList.clear();
+ }
+ for (int i = 0; i < tableList.size(); i++) {
+ this.tableList.add(tableList.get(i));
+ }
+
+ LOG.debug(tableList.size() + " tables exist in table set.");
+ }
+
+ /**
+ * Get the table set of this image.
+ * @return The table set list
+ */
+ public ArrayList getTableList() {
+ if (this.tableList == null) {
+ this.tableList = new ArrayList();
+ }
+ return this.tableList;
+ }
+
+ /**
+ * Persist the manifest file.
+ * @throws IOException IOException when storing the manifest file.
+ */
+
+ public void store(Configuration conf) throws BackupException {
+ byte[] data = toByteArray();
+ // write the file, overwrite if already exist
+ Path manifestFilePath =
+ new Path(new Path((this.tableBackupDir != null ? this.tableBackupDir : this.logBackupDir))
+ ,MANIFEST_FILE_NAME);
+ try {
+ FSDataOutputStream out =
+ manifestFilePath.getFileSystem(conf).create(manifestFilePath, true);
+ out.write(data);
+ out.close();
+ } catch (IOException e) {
+ LOG.error(e);
+ throw new BackupException(e.getMessage());
+ }
+
+ LOG.debug("Manifestfilestored_to " + this.tableBackupDir != null ? this.tableBackupDir
+ : this.logBackupDir + Path.SEPARATOR + MANIFEST_FILE_NAME);
+ }
+
+ /**
+ * Protobuf serialization
+ * @return The filter serialized using pb
+ */
+ public byte[] toByteArray() {
+ BackupProtos.BackupManifest.Builder builder = BackupProtos.BackupManifest.newBuilder();
+ builder.setVersion(this.version);
+ builder.setBackupId(this.backupId);
+ builder.setType(BackupProtos.BackupType.valueOf(this.type.name()));
+ setTableList(builder);
+ builder.setStartTs(this.startTs);
+ builder.setCompleteTs(this.completeTs);
+ builder.setTotalBytes(this.totalBytes);
+ if (this.type == BackupType.INCREMENTAL) {
+ builder.setLogBytes(this.logBytes);
+ }
+ setIncrementalTimestampMap(builder);
+ setDependencyMap(builder);
+ builder.setCompacted(this.isCompacted);
+ return builder.build().toByteArray();
+ }
+
+ private void setIncrementalTimestampMap(BackupProtos.BackupManifest.Builder builder) {
+ if (this.incrTimeRanges == null) return;
+ for (Entry> entry: this.incrTimeRanges.entrySet()) {
+ TableName key = entry.getKey();
+ HashMap value = entry.getValue();
+ BackupProtos.TableServerTimestamp.Builder tstBuilder =
+ BackupProtos.TableServerTimestamp.newBuilder();
+ tstBuilder.setTable(ProtobufUtil.toProtoTableName(key));
+
+ for (String s : value.keySet()) {
+ BackupProtos.ServerTimestamp.Builder stBuilder = BackupProtos.ServerTimestamp.newBuilder();
+ stBuilder.setServer(s);
+ stBuilder.setTimestamp(value.get(s));
+ tstBuilder.addServerTimestamp(stBuilder.build());
+ }
+ builder.addTstMap(tstBuilder.build());
+ }
+ }
+
+ private void setDependencyMap(BackupProtos.BackupManifest.Builder builder) {
+ for (BackupImage image: getDependency().values()) {
+ builder.addDependentBackupImage(image.toProto());
+ }
+ }
+
+ private void setTableList(BackupProtos.BackupManifest.Builder builder) {
+ for(TableName name: tableList){
+ builder.addTableList(ProtobufUtil.toProtoTableName(name));
+ }
+ }
+
+ /**
+ * Parse protobuf from byte array
+ * @param pbBytes A pb serialized BackupManifest instance
+ * @return An instance of made from bytes
+ * @throws DeserializationException
+ */
+ private static BackupProtos.BackupManifest parseFrom(final byte[] pbBytes)
+ throws DeserializationException {
+ BackupProtos.BackupManifest proto;
+ try {
+ proto = BackupProtos.BackupManifest.parseFrom(pbBytes);
+ } catch (InvalidProtocolBufferException e) {
+ throw new DeserializationException(e);
+ }
+ return proto;
+ }
+
+ /**
+ * Get manifest file version
+ * @return version
+ */
+ public String getVersion() {
+ return version;
+ }
+
+ /**
+ * Get this backup image.
+ * @return the backup image.
+ */
+ public BackupImage getBackupImage() {
+ return this.getDependency().get(this.backupId);
+ }
+
+ /**
+ * Add dependent backup image for this backup.
+ * @param image The direct dependent backup image
+ */
+ public void addDependentImage(BackupImage image) {
+ this.getDependency().get(this.backupId).addAncestor(image);
+ this.setDependencyMap(this.getDependency(), image);
+ }
+
+
+
+ /**
+ * Get all dependent backup images. The image of this backup is also contained.
+ * @return The dependent backup images map
+ */
+ public Map getDependency() {
+ if (this.dependency == null) {
+ this.dependency = new HashMap();
+ LOG.debug(this.rootDir + " " + this.backupId + " " + this.type);
+ this.dependency.put(this.backupId,
+ new BackupImage(this.backupId, this.type, this.rootDir, tableList, this.startTs,
+ this.completeTs));
+ }
+ return this.dependency;
+ }
+
+ /**
+ * Set the incremental timestamp map directly.
+ * @param incrTimestampMap timestamp map
+ */
+ public void setIncrTimestampMap(HashMap> incrTimestampMap) {
+ this.incrTimeRanges = incrTimestampMap;
+ }
+
+
+ public Map> getIncrTimestampMap() {
+ if (this.incrTimeRanges == null) {
+ this.incrTimeRanges = new HashMap>();
+ }
+ return this.incrTimeRanges;
+ }
+
+
+ /**
+ * Get the image list of this backup for restore in time order.
+ * @param reverse If true, then output in reverse order, otherwise in time order from old to new
+ * @return the backup image list for restore in time order
+ */
+ public ArrayList getRestoreDependentList(boolean reverse) {
+ TreeMap restoreImages = new TreeMap();
+ for (BackupImage image : this.getDependency().values()) {
+ restoreImages.put(Long.valueOf(image.startTs), image);
+ }
+ return new ArrayList(reverse ? (restoreImages.descendingMap().values())
+ : (restoreImages.values()));
+ }
+
+ /**
+ * Get the dependent image list for a specific table of this backup in time order from old to new
+ * if want to restore to this backup image level.
+ * @param table table
+ * @return the backup image list for a table in time order
+ */
+ public ArrayList getDependentListByTable(TableName table) {
+ ArrayList tableImageList = new ArrayList();
+ ArrayList imageList = getRestoreDependentList(true);
+ for (BackupImage image : imageList) {
+ if (image.hasTable(table)) {
+ tableImageList.add(image);
+ if (image.getType() == BackupType.FULL) {
+ break;
+ }
+ }
+ }
+ Collections.reverse(tableImageList);
+ return tableImageList;
+ }
+
+ /**
+ * Get the full dependent image list in the whole dependency scope for a specific table of this
+ * backup in time order from old to new.
+ * @param table table
+ * @return the full backup image list for a table in time order in the whole scope of the
+ * dependency of this image
+ */
+ public ArrayList getAllDependentListByTable(TableName table) {
+ ArrayList tableImageList = new ArrayList();
+ ArrayList imageList = getRestoreDependentList(false);
+ for (BackupImage image : imageList) {
+ if (image.hasTable(table)) {
+ tableImageList.add(image);
+ }
+ }
+ return tableImageList;
+ }
+
+
+ /**
+ * Recursively set the dependency map of the backup images.
+ * @param map The dependency map
+ * @param image The backup image
+ */
+ private void setDependencyMap(Map map, BackupImage image) {
+ if (image == null) {
+ return;
+ } else {
+ map.put(image.getBackupId(), image);
+ for (BackupImage img : image.getAncestors()) {
+ setDependencyMap(map, img);
+ }
+ }
+ }
+
+ /**
+ * Check whether backup image1 could cover backup image2 or not.
+ * @param image1 backup image 1
+ * @param image2 backup image 2
+ * @return true if image1 can cover image2, otherwise false
+ */
+ public static boolean canCoverImage(BackupImage image1, BackupImage image2) {
+ // image1 can cover image2 only when the following conditions are satisfied:
+ // - image1 must not be an incremental image;
+ // - image1 must be taken after image2 has been taken;
+ // - table set of image1 must cover the table set of image2.
+ if (image1.getType() == BackupType.INCREMENTAL) {
+ return false;
+ }
+ if (image1.getStartTs() < image2.getStartTs()) {
+ return false;
+ }
+ List image1TableList = image1.getTableNames();
+ List image2TableList = image2.getTableNames();
+ boolean found = false;
+ for (int i = 0; i < image2TableList.size(); i++) {
+ found = false;
+ for (int j = 0; j < image1TableList.size(); j++) {
+ if (image2TableList.get(i).equals(image1TableList.get(j))) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ return false;
+ }
+ }
+
+ LOG.debug("Backup image " + image1.getBackupId() + " can cover " + image2.getBackupId());
+ return true;
+ }
+
+ /**
+ * Check whether backup image set could cover a backup image or not.
+ * @param fullImages The backup image set
+ * @param image The target backup image
+ * @return true if fullImages can cover image, otherwise false
+ */
+ public static boolean canCoverImage(ArrayList fullImages, BackupImage image) {
+ // fullImages can cover image only when the following conditions are satisfied:
+ // - each image of fullImages must not be an incremental image;
+ // - each image of fullImages must be taken after image has been taken;
+ // - sum table set of fullImages must cover the table set of image.
+ for (BackupImage image1 : fullImages) {
+ if (image1.getType() == BackupType.INCREMENTAL) {
+ return false;
+ }
+ if (image1.getStartTs() < image.getStartTs()) {
+ return false;
+ }
+ }
+
+ ArrayList image1TableList = new ArrayList();
+ for (BackupImage image1 : fullImages) {
+ List tableList = image1.getTableNames();
+ for (TableName table : tableList) {
+ image1TableList.add(table.getNameAsString());
+ }
+ }
+ ArrayList image2TableList = new ArrayList();
+ List tableList = image.getTableNames();
+ for (TableName table : tableList) {
+ image2TableList.add(table.getNameAsString());
+ }
+
+ for (int i = 0; i < image2TableList.size(); i++) {
+ if (image1TableList.contains(image2TableList.get(i)) == false) {
+ return false;
+ }
+ }
+
+ LOG.debug("Full image set can cover image " + image.getBackupId());
+ return true;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java
new file mode 100644
index 0000000..d0ce059
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * HConstants holds a bunch of HBase Backup and Restore constants
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public final class BackupRestoreConstants {
+
+
+ // delimiter in tablename list in restore command
+ public static final String TABLENAME_DELIMITER_IN_COMMAND = ",";
+
+ public static final String CONF_STAGING_ROOT = "snapshot.export.staging.root";
+
+ public static final String BACKUPID_PREFIX = "backup_";
+
+ public static enum BackupCommand {
+ CREATE, CANCEL, DELETE, DESCRIBE, HISTORY, STATUS, CONVERT, MERGE, STOP, SHOW, HELP,
+ }
+
+ private BackupRestoreConstants() {
+ // Can't be instantiated with this ctor.
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSnapshotCopy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSnapshotCopy.java
new file mode 100644
index 0000000..8b8a83f
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSnapshotCopy.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import org.apache.hadoop.hbase.snapshot.ExportSnapshot;
+
+/* this class will be extended in future jira to support progress report */
+public class BackupSnapshotCopy extends ExportSnapshot {
+ private BackupHandler backupHandler;
+ private String table;
+
+ public BackupSnapshotCopy(BackupHandler backupHandler, String table) {
+ super();
+ this.backupHandler = backupHandler;
+ this.table = table;
+ }
+
+ public BackupHandler getBackupHandler() {
+ return this.backupHandler;
+ }
+
+ public String getTable() {
+ return this.table;
+ }
+
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupStatus.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupStatus.java
new file mode 100644
index 0000000..6e54994
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupStatus.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.Serializable;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.BackupProtos;
+
+/**
+ * Backup status and related information encapsulated for a table.
+ * At this moment only TargetDir and SnapshotName is encapsulated here.
+ * future Jira will be implemented for progress, bytesCopies, phase, etc.
+ */
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupStatus implements Serializable {
+
+ private static final long serialVersionUID = -5968397963548535982L;
+
+ // table name for backup
+ private TableName table;
+
+ // target directory of the backup image for this table
+ private String targetDir;
+
+ // snapshot name for offline/online snapshot
+ private String snapshotName = null;
+
+ public BackupStatus() {
+
+ }
+
+ public BackupStatus(TableName table, String targetRootDir, String backupId) {
+ this.table = table;
+ this.targetDir = HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table);
+ }
+
+ public String getSnapshotName() {
+ return snapshotName;
+ }
+
+ public void setSnapshotName(String snapshotName) {
+ this.snapshotName = snapshotName;
+ }
+
+ public String getTargetDir() {
+ return targetDir;
+ }
+
+ public TableName getTable() {
+ return table;
+ }
+
+ public void setTable(TableName table) {
+ this.table = table;
+ }
+
+ public void setTargetDir(String targetDir) {
+ this.targetDir = targetDir;
+ }
+
+ public static BackupStatus convert(BackupProtos.TableBackupStatus proto)
+ {
+ BackupStatus bs = new BackupStatus();
+ bs.setTable(ProtobufUtil.toTableName(proto.getTable()));
+ bs.setTargetDir(proto.getTargetDir());
+ if(proto.hasSnapshot()){
+ bs.setSnapshotName(proto.getSnapshot());
+ }
+ return bs;
+ }
+
+ public BackupProtos.TableBackupStatus toProto() {
+ BackupProtos.TableBackupStatus.Builder builder =
+ BackupProtos.TableBackupStatus.newBuilder();
+ if(snapshotName != null) {
+ builder.setSnapshot(snapshotName);
+ }
+ builder.setTable(ProtobufUtil.toProtoTableName(table));
+ builder.setTargetDir(targetDir);
+ return builder.build();
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
new file mode 100644
index 0000000..18a0f06
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
@@ -0,0 +1,571 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeSet;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState;
+import org.apache.hadoop.hbase.backup.impl.BackupUtil.BackupCompleteData;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.BackupProtos;
+
+/**
+ * This class provides 'hbase:backup' table API
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public final class BackupSystemTable implements Closeable {
+
+ private static final Log LOG = LogFactory.getLog(BackupSystemTable.class);
+ private final static String TABLE_NAMESPACE = "hbase";
+ private final static String TABLE_NAME = "backup";
+ private final static TableName tableName = TableName.valueOf(TABLE_NAMESPACE, TABLE_NAME);
+ final static byte[] familyName = "f".getBytes();
+
+ // Connection to HBase cluster, shared
+ // among all instances
+ private final Connection connection;
+ // Cluster configuration
+ private final Configuration conf;
+
+ /**
+ * Create a BackupSystemTable object for the given Connection. Connection is NOT owned by this
+ * instance and has to be closed explicitly.
+ * @param connection
+ * @throws IOException
+ */
+ public BackupSystemTable(Connection connection) throws IOException {
+ this.connection = connection;
+ this.conf = connection.getConfiguration();
+
+ createSystemTableIfNotExists();
+ }
+
+ @Override
+ public void close() {
+ }
+
+ /**
+ * Gets table name
+ * @return table name
+ */
+ public static TableName getTableName() {
+ return tableName;
+ }
+
+ private void createSystemTableIfNotExists() throws IOException {
+ try(Admin admin = connection.getAdmin()) {
+ if (admin.tableExists(tableName) == false) {
+ HTableDescriptor tableDesc = new HTableDescriptor(tableName);
+ HColumnDescriptor colDesc = new HColumnDescriptor(familyName);
+ colDesc.setMaxVersions(1);
+ int ttl =
+ conf.getInt(HConstants.BACKUP_SYSTEM_TTL_KEY, HConstants.BACKUP_SYSTEM_TTL_DEFAULT);
+ colDesc.setTimeToLive(ttl);
+ tableDesc.addFamily(colDesc);
+ admin.createTable(tableDesc);
+ }
+ } catch (IOException e) {
+ LOG.error(e);
+ throw e;
+ }
+ }
+
+ /**
+ * Updates status (state) of a backup session in hbase:backup table
+ * @param context context
+ * @throws IOException exception
+ */
+ public void updateBackupStatus(BackupContext context) throws IOException {
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("update backup status in hbase:backup for: " + context.getBackupId()
+ + " set status=" + context.getState());
+ }
+ try (Table table = connection.getTable(tableName)) {
+ Put put = BackupSystemTableHelper.createPutForBackupContext(context);
+ table.put(put);
+ }
+ }
+
+ /**
+ * Deletes backup status from hbase:backup table
+ * @param backupId backup id
+ * @throws IOException exception
+ */
+
+ public void deleteBackupStatus(String backupId) throws IOException {
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("delete backup status in hbase:backup for " + backupId);
+ }
+ try (Table table = connection.getTable(tableName)) {
+ Delete del = BackupSystemTableHelper.createDeletForBackupContext(backupId);
+ table.delete(del);
+ }
+ }
+
+ /**
+ * Reads backup status object (instance of BackupContext) from hbase:backup table
+ * @param backupId - backupId
+ * @return Current status of backup session or null
+ */
+
+ public BackupContext readBackupStatus(String backupId) throws IOException {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("read backup status from hbase:backup for: " + backupId);
+ }
+
+ try (Table table = connection.getTable(tableName)) {
+ Get get = BackupSystemTableHelper.createGetForBackupContext(backupId);
+ Result res = table.get(get);
+ if(res.isEmpty()){
+ return null;
+ }
+ return BackupSystemTableHelper.resultToBackupContext(res);
+ }
+ }
+
+ /**
+ * Read the last backup start code (timestamp) of last successful backup. Will return null if
+ * there is no start code stored on hbase or the value is of length 0. These two cases indicate
+ * there is no successful backup completed so far.
+ * @return the timestamp of last successful backup
+ * @throws IOException exception
+ */
+ public String readBackupStartCode() throws IOException {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("read backup start code from hbase:backup");
+ }
+ try (Table table = connection.getTable(tableName)) {
+ Get get = BackupSystemTableHelper.createGetForStartCode();
+ Result res = table.get(get);
+ if (res.isEmpty()) {
+ return null;
+ }
+ Cell cell = res.listCells().get(0);
+ byte[] val = CellUtil.cloneValue(cell);
+ if (val.length == 0){
+ return null;
+ }
+ return new String(val);
+ }
+ }
+
+ /**
+ * Write the start code (timestamp) to hbase:backup. If passed in null, then write 0 byte.
+ * @param startCode start code
+ * @throws IOException exception
+ */
+ public void writeBackupStartCode(Long startCode) throws IOException {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("write backup start code to hbase:backup " + startCode);
+ }
+ try (Table table = connection.getTable(tableName)) {
+ Put put = BackupSystemTableHelper.createPutForStartCode(startCode.toString());
+ table.put(put);
+ }
+ }
+
+ /**
+ * Get the Region Servers log information after the last log roll from hbase:backup.
+ * @return RS log info
+ * @throws IOException exception
+ */
+ public HashMap readRegionServerLastLogRollResult()
+ throws IOException {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("read region server last roll log result to hbase:backup");
+ }
+
+ Scan scan = BackupSystemTableHelper.createScanForReadRegionServerLastLogRollResult();
+ scan.setMaxVersions(1);
+
+ try (Table table = connection.getTable(tableName);
+ ResultScanner scanner = table.getScanner(scan)) {
+ Result res = null;
+ HashMap rsTimestampMap = new HashMap();
+ while ((res = scanner.next()) != null) {
+ res.advance();
+ Cell cell = res.current();
+ byte[] row = CellUtil.cloneRow(cell);
+ String server =
+ BackupSystemTableHelper.getServerNameForReadRegionServerLastLogRollResult(row);
+
+ byte[] data = CellUtil.cloneValue(cell);
+ rsTimestampMap.put(server, Long.parseLong(new String(data)));
+ }
+ return rsTimestampMap;
+ }
+ }
+
+ /**
+ * Writes Region Server last roll log result (timestamp) to hbase:backup table
+ * @param server - Region Server name
+ * @param timestamp - last log timestamp
+ * @throws IOException exception
+ */
+ public void writeRegionServerLastLogRollResult(String server, Long timestamp)
+ throws IOException {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("write region server last roll log result to hbase:backup");
+ }
+ try (Table table = connection.getTable(tableName)) {
+ Put put =
+ BackupSystemTableHelper.createPutForRegionServerLastLogRollResult(server, timestamp);
+ table.put(put);
+ }
+ }
+
+ /**
+ * Get all completed backup information (in desc order by time)
+ * @return history info of BackupCompleteData
+ * @throws IOException exception
+ */
+ public ArrayList getBackupHistory() throws IOException {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("get backup history from hbase:backup");
+ }
+ Scan scan = BackupSystemTableHelper.createScanForBackupHistory();
+ scan.setMaxVersions(1);
+
+ ArrayList