diff --git bin/hbase bin/hbase
index 5064451..9fd903a 100755
--- bin/hbase
+++ bin/hbase
@@ -99,6 +99,8 @@ if [ $# = 0 ]; then
echo " pe Run PerformanceEvaluation"
echo " ltt Run LoadTestTool"
echo " version Print the version"
+ echo " backup backup tables for recovery"
+ echo " restore restore tables from existing backup image"
echo " CLASSNAME Run the class named CLASSNAME"
exit 1
fi
@@ -303,6 +305,10 @@ elif [ "$COMMAND" = "hfile" ] ; then
CLASS='org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter'
elif [ "$COMMAND" = "zkcli" ] ; then
CLASS="org.apache.hadoop.hbase.zookeeper.ZooKeeperMainServer"
+elif [ "$COMMAND" = "backup" ] ; then
+ CLASS='org.apache.hadoop.hbase.backup.BackupClient'
+elif [ "$COMMAND" = "restore" ] ; then
+ CLASS='org.apache.hadoop.hbase.backup.RestoreClient'
elif [ "$COMMAND" = "upgrade" ] ; then
echo "This command was used to upgrade to HBase 0.96, it was removed in HBase 2.0.0."
echo "Please follow the documentation at http://hbase.apache.org/book.html#upgrading."
diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 6fafad3..c106831 100644
--- hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1263,6 +1263,15 @@ public final class HConstants {
public static final String ZK_SERVER_KEYTAB_FILE = "hbase.zookeeper.server.keytab.file";
public static final String ZK_SERVER_KERBEROS_PRINCIPAL =
"hbase.zookeeper.server.kerberos.principal";
+
+ /**
+ * Backup/Restore constants
+ */
+ public final static String BACKUP_ENABLE_KEY = "hbase.backup.enable";
+ public final static boolean BACKUP_ENABLE_DEFAULT = true;
+ public final static String BACKUP_SYSTEM_TTL_KEY = "hbase.backup.system.ttl";
+ // Default TTL = 1 year
+ public final static int BACKUP_SYSTEM_TTL_DEFAULT = FOREVER;
private HConstants() {
// Can't be instantiated with this ctor.
diff --git hbase-protocol/pom.xml hbase-protocol/pom.xml
index 8034576..9098944 100644
--- hbase-protocol/pom.xml
+++ hbase-protocol/pom.xml
@@ -171,6 +171,7 @@
Admin.proto
Aggregate.proto
Authentication.proto
+ Backup.proto
Cell.proto
Client.proto
ClusterId.proto
diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java
new file mode 100644
index 0000000..e3f3fe1
--- /dev/null
+++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java
@@ -0,0 +1,5803 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: Backup.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class BackupProtos {
+ private BackupProtos() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface BackupManifestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string version = 1;
+ /**
+ * required string version = 1;
+ */
+ boolean hasVersion();
+ /**
+ * required string version = 1;
+ */
+ java.lang.String getVersion();
+ /**
+ * required string version = 1;
+ */
+ com.google.protobuf.ByteString
+ getVersionBytes();
+
+ // required string token = 2;
+ /**
+ * required string token = 2;
+ */
+ boolean hasToken();
+ /**
+ * required string token = 2;
+ */
+ java.lang.String getToken();
+ /**
+ * required string token = 2;
+ */
+ com.google.protobuf.ByteString
+ getTokenBytes();
+
+ // required string type = 3;
+ /**
+ * required string type = 3;
+ */
+ boolean hasType();
+ /**
+ * required string type = 3;
+ */
+ java.lang.String getType();
+ /**
+ * required string type = 3;
+ */
+ com.google.protobuf.ByteString
+ getTypeBytes();
+
+ // required string tableSet = 4;
+ /**
+ * required string tableSet = 4;
+ */
+ boolean hasTableSet();
+ /**
+ * required string tableSet = 4;
+ */
+ java.lang.String getTableSet();
+ /**
+ * required string tableSet = 4;
+ */
+ com.google.protobuf.ByteString
+ getTableSetBytes();
+
+ // required int64 startTs = 5;
+ /**
+ * required int64 startTs = 5;
+ */
+ boolean hasStartTs();
+ /**
+ * required int64 startTs = 5;
+ */
+ long getStartTs();
+
+ // required int64 completeTs = 6;
+ /**
+ * required int64 completeTs = 6;
+ */
+ boolean hasCompleteTs();
+ /**
+ * required int64 completeTs = 6;
+ */
+ long getCompleteTs();
+
+ // required int64 tableBytes = 7;
+ /**
+ * required int64 tableBytes = 7;
+ */
+ boolean hasTableBytes();
+ /**
+ * required int64 tableBytes = 7;
+ */
+ long getTableBytes();
+
+ // optional int64 logBytes = 8;
+ /**
+ * optional int64 logBytes = 8;
+ */
+ boolean hasLogBytes();
+ /**
+ * optional int64 logBytes = 8;
+ */
+ long getLogBytes();
+
+ // required string incrTimerange = 9;
+ /**
+ * required string incrTimerange = 9;
+ */
+ boolean hasIncrTimerange();
+ /**
+ * required string incrTimerange = 9;
+ */
+ java.lang.String getIncrTimerange();
+ /**
+ * required string incrTimerange = 9;
+ */
+ com.google.protobuf.ByteString
+ getIncrTimerangeBytes();
+
+ // required string dependency = 10;
+ /**
+ * required string dependency = 10;
+ */
+ boolean hasDependency();
+ /**
+ * required string dependency = 10;
+ */
+ java.lang.String getDependency();
+ /**
+ * required string dependency = 10;
+ */
+ com.google.protobuf.ByteString
+ getDependencyBytes();
+
+ // required string imageState = 11;
+ /**
+ * required string imageState = 11;
+ */
+ boolean hasImageState();
+ /**
+ * required string imageState = 11;
+ */
+ java.lang.String getImageState();
+ /**
+ * required string imageState = 11;
+ */
+ com.google.protobuf.ByteString
+ getImageStateBytes();
+
+ // required bool compacted = 12;
+ /**
+ * required bool compacted = 12;
+ */
+ boolean hasCompacted();
+ /**
+ * required bool compacted = 12;
+ */
+ boolean getCompacted();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.BackupManifest}
+ */
+ public static final class BackupManifest extends
+ com.google.protobuf.GeneratedMessage
+ implements BackupManifestOrBuilder {
+ // Use BackupManifest.newBuilder() to construct.
+ private BackupManifest(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private BackupManifest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final BackupManifest defaultInstance;
+ public static BackupManifest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public BackupManifest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private BackupManifest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ version_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ token_ = input.readBytes();
+ break;
+ }
+ case 26: {
+ bitField0_ |= 0x00000004;
+ type_ = input.readBytes();
+ break;
+ }
+ case 34: {
+ bitField0_ |= 0x00000008;
+ tableSet_ = input.readBytes();
+ break;
+ }
+ case 40: {
+ bitField0_ |= 0x00000010;
+ startTs_ = input.readInt64();
+ break;
+ }
+ case 48: {
+ bitField0_ |= 0x00000020;
+ completeTs_ = input.readInt64();
+ break;
+ }
+ case 56: {
+ bitField0_ |= 0x00000040;
+ tableBytes_ = input.readInt64();
+ break;
+ }
+ case 64: {
+ bitField0_ |= 0x00000080;
+ logBytes_ = input.readInt64();
+ break;
+ }
+ case 74: {
+ bitField0_ |= 0x00000100;
+ incrTimerange_ = input.readBytes();
+ break;
+ }
+ case 82: {
+ bitField0_ |= 0x00000200;
+ dependency_ = input.readBytes();
+ break;
+ }
+ case 90: {
+ bitField0_ |= 0x00000400;
+ imageState_ = input.readBytes();
+ break;
+ }
+ case 96: {
+ bitField0_ |= 0x00000800;
+ compacted_ = input.readBool();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public BackupManifest parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new BackupManifest(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string version = 1;
+ public static final int VERSION_FIELD_NUMBER = 1;
+ private java.lang.Object version_;
+ /**
+ * required string version = 1;
+ */
+ public boolean hasVersion() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string version = 1;
+ */
+ public java.lang.String getVersion() {
+ java.lang.Object ref = version_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ version_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string version = 1;
+ */
+ public com.google.protobuf.ByteString
+ getVersionBytes() {
+ java.lang.Object ref = version_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ version_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required string token = 2;
+ public static final int TOKEN_FIELD_NUMBER = 2;
+ private java.lang.Object token_;
+ /**
+ * required string token = 2;
+ */
+ public boolean hasToken() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string token = 2;
+ */
+ public java.lang.String getToken() {
+ java.lang.Object ref = token_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ token_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string token = 2;
+ */
+ public com.google.protobuf.ByteString
+ getTokenBytes() {
+ java.lang.Object ref = token_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ token_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required string type = 3;
+ public static final int TYPE_FIELD_NUMBER = 3;
+ private java.lang.Object type_;
+ /**
+ * required string type = 3;
+ */
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required string type = 3;
+ */
+ public java.lang.String getType() {
+ java.lang.Object ref = type_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ type_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string type = 3;
+ */
+ public com.google.protobuf.ByteString
+ getTypeBytes() {
+ java.lang.Object ref = type_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ type_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required string tableSet = 4;
+ public static final int TABLESET_FIELD_NUMBER = 4;
+ private java.lang.Object tableSet_;
+ /**
+ * required string tableSet = 4;
+ */
+ public boolean hasTableSet() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * required string tableSet = 4;
+ */
+ public java.lang.String getTableSet() {
+ java.lang.Object ref = tableSet_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ tableSet_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string tableSet = 4;
+ */
+ public com.google.protobuf.ByteString
+ getTableSetBytes() {
+ java.lang.Object ref = tableSet_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ tableSet_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required int64 startTs = 5;
+ public static final int STARTTS_FIELD_NUMBER = 5;
+ private long startTs_;
+ /**
+ * required int64 startTs = 5;
+ */
+ public boolean hasStartTs() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * required int64 startTs = 5;
+ */
+ public long getStartTs() {
+ return startTs_;
+ }
+
+ // required int64 completeTs = 6;
+ public static final int COMPLETETS_FIELD_NUMBER = 6;
+ private long completeTs_;
+ /**
+ * required int64 completeTs = 6;
+ */
+ public boolean hasCompleteTs() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * required int64 completeTs = 6;
+ */
+ public long getCompleteTs() {
+ return completeTs_;
+ }
+
+ // required int64 tableBytes = 7;
+ public static final int TABLEBYTES_FIELD_NUMBER = 7;
+ private long tableBytes_;
+ /**
+ * required int64 tableBytes = 7;
+ */
+ public boolean hasTableBytes() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * required int64 tableBytes = 7;
+ */
+ public long getTableBytes() {
+ return tableBytes_;
+ }
+
+ // optional int64 logBytes = 8;
+ public static final int LOGBYTES_FIELD_NUMBER = 8;
+ private long logBytes_;
+ /**
+ * optional int64 logBytes = 8;
+ */
+ public boolean hasLogBytes() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ /**
+ * optional int64 logBytes = 8;
+ */
+ public long getLogBytes() {
+ return logBytes_;
+ }
+
+ // required string incrTimerange = 9;
+ public static final int INCRTIMERANGE_FIELD_NUMBER = 9;
+ private java.lang.Object incrTimerange_;
+ /**
+ * required string incrTimerange = 9;
+ */
+ public boolean hasIncrTimerange() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * required string incrTimerange = 9;
+ */
+ public java.lang.String getIncrTimerange() {
+ java.lang.Object ref = incrTimerange_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ incrTimerange_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string incrTimerange = 9;
+ */
+ public com.google.protobuf.ByteString
+ getIncrTimerangeBytes() {
+ java.lang.Object ref = incrTimerange_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ incrTimerange_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required string dependency = 10;
+ public static final int DEPENDENCY_FIELD_NUMBER = 10;
+ private java.lang.Object dependency_;
+ /**
+ * required string dependency = 10;
+ */
+ public boolean hasDependency() {
+ return ((bitField0_ & 0x00000200) == 0x00000200);
+ }
+ /**
+ * required string dependency = 10;
+ */
+ public java.lang.String getDependency() {
+ java.lang.Object ref = dependency_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ dependency_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string dependency = 10;
+ */
+ public com.google.protobuf.ByteString
+ getDependencyBytes() {
+ java.lang.Object ref = dependency_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ dependency_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required string imageState = 11;
+ public static final int IMAGESTATE_FIELD_NUMBER = 11;
+ private java.lang.Object imageState_;
+ /**
+ * required string imageState = 11;
+ */
+ public boolean hasImageState() {
+ return ((bitField0_ & 0x00000400) == 0x00000400);
+ }
+ /**
+ * required string imageState = 11;
+ */
+ public java.lang.String getImageState() {
+ java.lang.Object ref = imageState_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ imageState_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string imageState = 11;
+ */
+ public com.google.protobuf.ByteString
+ getImageStateBytes() {
+ java.lang.Object ref = imageState_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ imageState_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required bool compacted = 12;
+ public static final int COMPACTED_FIELD_NUMBER = 12;
+ private boolean compacted_;
+ /**
+ * required bool compacted = 12;
+ */
+ public boolean hasCompacted() {
+ return ((bitField0_ & 0x00000800) == 0x00000800);
+ }
+ /**
+ * required bool compacted = 12;
+ */
+ public boolean getCompacted() {
+ return compacted_;
+ }
+
+ private void initFields() {
+ version_ = "";
+ token_ = "";
+ type_ = "";
+ tableSet_ = "";
+ startTs_ = 0L;
+ completeTs_ = 0L;
+ tableBytes_ = 0L;
+ logBytes_ = 0L;
+ incrTimerange_ = "";
+ dependency_ = "";
+ imageState_ = "";
+ compacted_ = false;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasVersion()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasToken()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasType()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTableSet()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasStartTs()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasCompleteTs()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTableBytes()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasIncrTimerange()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasDependency()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasImageState()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasCompacted()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getVersionBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getTokenBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBytes(3, getTypeBytes());
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeBytes(4, getTableSetBytes());
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeInt64(5, startTs_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ output.writeInt64(6, completeTs_);
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ output.writeInt64(7, tableBytes_);
+ }
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ output.writeInt64(8, logBytes_);
+ }
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ output.writeBytes(9, getIncrTimerangeBytes());
+ }
+ if (((bitField0_ & 0x00000200) == 0x00000200)) {
+ output.writeBytes(10, getDependencyBytes());
+ }
+ if (((bitField0_ & 0x00000400) == 0x00000400)) {
+ output.writeBytes(11, getImageStateBytes());
+ }
+ if (((bitField0_ & 0x00000800) == 0x00000800)) {
+ output.writeBool(12, compacted_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getVersionBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getTokenBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(3, getTypeBytes());
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(4, getTableSetBytes());
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(5, startTs_);
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(6, completeTs_);
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(7, tableBytes_);
+ }
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(8, logBytes_);
+ }
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(9, getIncrTimerangeBytes());
+ }
+ if (((bitField0_ & 0x00000200) == 0x00000200)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(10, getDependencyBytes());
+ }
+ if (((bitField0_ & 0x00000400) == 0x00000400)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(11, getImageStateBytes());
+ }
+ if (((bitField0_ & 0x00000800) == 0x00000800)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(12, compacted_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.BackupManifest}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ version_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ token_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ type_ = "";
+ bitField0_ = (bitField0_ & ~0x00000004);
+ tableSet_ = "";
+ bitField0_ = (bitField0_ & ~0x00000008);
+ startTs_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000010);
+ completeTs_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000020);
+ tableBytes_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000040);
+ logBytes_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000080);
+ incrTimerange_ = "";
+ bitField0_ = (bitField0_ & ~0x00000100);
+ dependency_ = "";
+ bitField0_ = (bitField0_ & ~0x00000200);
+ imageState_ = "";
+ bitField0_ = (bitField0_ & ~0x00000400);
+ compacted_ = false;
+ bitField0_ = (bitField0_ & ~0x00000800);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest build() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.version_ = version_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.token_ = token_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.type_ = type_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.tableSet_ = tableSet_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ result.startTs_ = startTs_;
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+ to_bitField0_ |= 0x00000020;
+ }
+ result.completeTs_ = completeTs_;
+ if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
+ to_bitField0_ |= 0x00000040;
+ }
+ result.tableBytes_ = tableBytes_;
+ if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
+ to_bitField0_ |= 0x00000080;
+ }
+ result.logBytes_ = logBytes_;
+ if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
+ to_bitField0_ |= 0x00000100;
+ }
+ result.incrTimerange_ = incrTimerange_;
+ if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
+ to_bitField0_ |= 0x00000200;
+ }
+ result.dependency_ = dependency_;
+ if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
+ to_bitField0_ |= 0x00000400;
+ }
+ result.imageState_ = imageState_;
+ if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
+ to_bitField0_ |= 0x00000800;
+ }
+ result.compacted_ = compacted_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.getDefaultInstance()) return this;
+ if (other.hasVersion()) {
+ bitField0_ |= 0x00000001;
+ version_ = other.version_;
+ onChanged();
+ }
+ if (other.hasToken()) {
+ bitField0_ |= 0x00000002;
+ token_ = other.token_;
+ onChanged();
+ }
+ if (other.hasType()) {
+ bitField0_ |= 0x00000004;
+ type_ = other.type_;
+ onChanged();
+ }
+ if (other.hasTableSet()) {
+ bitField0_ |= 0x00000008;
+ tableSet_ = other.tableSet_;
+ onChanged();
+ }
+ if (other.hasStartTs()) {
+ setStartTs(other.getStartTs());
+ }
+ if (other.hasCompleteTs()) {
+ setCompleteTs(other.getCompleteTs());
+ }
+ if (other.hasTableBytes()) {
+ setTableBytes(other.getTableBytes());
+ }
+ if (other.hasLogBytes()) {
+ setLogBytes(other.getLogBytes());
+ }
+ if (other.hasIncrTimerange()) {
+ bitField0_ |= 0x00000100;
+ incrTimerange_ = other.incrTimerange_;
+ onChanged();
+ }
+ if (other.hasDependency()) {
+ bitField0_ |= 0x00000200;
+ dependency_ = other.dependency_;
+ onChanged();
+ }
+ if (other.hasImageState()) {
+ bitField0_ |= 0x00000400;
+ imageState_ = other.imageState_;
+ onChanged();
+ }
+ if (other.hasCompacted()) {
+ setCompacted(other.getCompacted());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasVersion()) {
+
+ return false;
+ }
+ if (!hasToken()) {
+
+ return false;
+ }
+ if (!hasType()) {
+
+ return false;
+ }
+ if (!hasTableSet()) {
+
+ return false;
+ }
+ if (!hasStartTs()) {
+
+ return false;
+ }
+ if (!hasCompleteTs()) {
+
+ return false;
+ }
+ if (!hasTableBytes()) {
+
+ return false;
+ }
+ if (!hasIncrTimerange()) {
+
+ return false;
+ }
+ if (!hasDependency()) {
+
+ return false;
+ }
+ if (!hasImageState()) {
+
+ return false;
+ }
+ if (!hasCompacted()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string version = 1;
+ private java.lang.Object version_ = "";
+ /**
+ * required string version = 1;
+ */
+ public boolean hasVersion() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string version = 1;
+ */
+ public java.lang.String getVersion() {
+ java.lang.Object ref = version_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ version_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string version = 1;
+ */
+ public com.google.protobuf.ByteString
+ getVersionBytes() {
+ java.lang.Object ref = version_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ version_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string version = 1;
+ */
+ public Builder setVersion(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ version_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string version = 1;
+ */
+ public Builder clearVersion() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ version_ = getDefaultInstance().getVersion();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string version = 1;
+ */
+ public Builder setVersionBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ version_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required string token = 2;
+ private java.lang.Object token_ = "";
+ /**
+ * required string token = 2;
+ */
+ public boolean hasToken() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string token = 2;
+ */
+ public java.lang.String getToken() {
+ java.lang.Object ref = token_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ token_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string token = 2;
+ */
+ public com.google.protobuf.ByteString
+ getTokenBytes() {
+ java.lang.Object ref = token_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ token_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string token = 2;
+ */
+ public Builder setToken(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ token_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string token = 2;
+ */
+ public Builder clearToken() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ token_ = getDefaultInstance().getToken();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string token = 2;
+ */
+ public Builder setTokenBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ token_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required string type = 3;
+ private java.lang.Object type_ = "";
+ /**
+ * required string type = 3;
+ */
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required string type = 3;
+ */
+ public java.lang.String getType() {
+ java.lang.Object ref = type_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ type_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string type = 3;
+ */
+ public com.google.protobuf.ByteString
+ getTypeBytes() {
+ java.lang.Object ref = type_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ type_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string type = 3;
+ */
+ public Builder setType(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ type_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string type = 3;
+ */
+ public Builder clearType() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ type_ = getDefaultInstance().getType();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string type = 3;
+ */
+ public Builder setTypeBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ type_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required string tableSet = 4;
+ private java.lang.Object tableSet_ = "";
+ /**
+ * required string tableSet = 4;
+ */
+ public boolean hasTableSet() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * required string tableSet = 4;
+ */
+ public java.lang.String getTableSet() {
+ java.lang.Object ref = tableSet_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ tableSet_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string tableSet = 4;
+ */
+ public com.google.protobuf.ByteString
+ getTableSetBytes() {
+ java.lang.Object ref = tableSet_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ tableSet_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string tableSet = 4;
+ */
+ public Builder setTableSet(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ tableSet_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string tableSet = 4;
+ */
+ public Builder clearTableSet() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ tableSet_ = getDefaultInstance().getTableSet();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string tableSet = 4;
+ */
+ public Builder setTableSetBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ tableSet_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required int64 startTs = 5;
+ private long startTs_ ;
+ /**
+ * required int64 startTs = 5;
+ */
+ public boolean hasStartTs() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * required int64 startTs = 5;
+ */
+ public long getStartTs() {
+ return startTs_;
+ }
+ /**
+ * required int64 startTs = 5;
+ */
+ public Builder setStartTs(long value) {
+ bitField0_ |= 0x00000010;
+ startTs_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required int64 startTs = 5;
+ */
+ public Builder clearStartTs() {
+ bitField0_ = (bitField0_ & ~0x00000010);
+ startTs_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // required int64 completeTs = 6;
+ private long completeTs_ ;
+ /**
+ * required int64 completeTs = 6;
+ */
+ public boolean hasCompleteTs() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * required int64 completeTs = 6;
+ */
+ public long getCompleteTs() {
+ return completeTs_;
+ }
+ /**
+ * required int64 completeTs = 6;
+ */
+ public Builder setCompleteTs(long value) {
+ bitField0_ |= 0x00000020;
+ completeTs_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required int64 completeTs = 6;
+ */
+ public Builder clearCompleteTs() {
+ bitField0_ = (bitField0_ & ~0x00000020);
+ completeTs_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // required int64 tableBytes = 7;
+ private long tableBytes_ ;
+ /**
+ * required int64 tableBytes = 7;
+ */
+ public boolean hasTableBytes() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * required int64 tableBytes = 7;
+ */
+ public long getTableBytes() {
+ return tableBytes_;
+ }
+ /**
+ * required int64 tableBytes = 7;
+ */
+ public Builder setTableBytes(long value) {
+ bitField0_ |= 0x00000040;
+ tableBytes_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required int64 tableBytes = 7;
+ */
+ public Builder clearTableBytes() {
+ bitField0_ = (bitField0_ & ~0x00000040);
+ tableBytes_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional int64 logBytes = 8;
+ private long logBytes_ ;
+ /**
+ * optional int64 logBytes = 8;
+ */
+ public boolean hasLogBytes() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ /**
+ * optional int64 logBytes = 8;
+ */
+ public long getLogBytes() {
+ return logBytes_;
+ }
+ /**
+ * optional int64 logBytes = 8;
+ */
+ public Builder setLogBytes(long value) {
+ bitField0_ |= 0x00000080;
+ logBytes_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional int64 logBytes = 8;
+ */
+ public Builder clearLogBytes() {
+ bitField0_ = (bitField0_ & ~0x00000080);
+ logBytes_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // required string incrTimerange = 9;
+ private java.lang.Object incrTimerange_ = "";
+ /**
+ * required string incrTimerange = 9;
+ */
+ public boolean hasIncrTimerange() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * required string incrTimerange = 9;
+ */
+ public java.lang.String getIncrTimerange() {
+ java.lang.Object ref = incrTimerange_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ incrTimerange_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string incrTimerange = 9;
+ */
+ public com.google.protobuf.ByteString
+ getIncrTimerangeBytes() {
+ java.lang.Object ref = incrTimerange_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ incrTimerange_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string incrTimerange = 9;
+ */
+ public Builder setIncrTimerange(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000100;
+ incrTimerange_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string incrTimerange = 9;
+ */
+ public Builder clearIncrTimerange() {
+ bitField0_ = (bitField0_ & ~0x00000100);
+ incrTimerange_ = getDefaultInstance().getIncrTimerange();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string incrTimerange = 9;
+ */
+ public Builder setIncrTimerangeBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000100;
+ incrTimerange_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required string dependency = 10;
+ private java.lang.Object dependency_ = "";
+ /**
+ * required string dependency = 10;
+ */
+ public boolean hasDependency() {
+ return ((bitField0_ & 0x00000200) == 0x00000200);
+ }
+ /**
+ * required string dependency = 10;
+ */
+ public java.lang.String getDependency() {
+ java.lang.Object ref = dependency_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ dependency_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string dependency = 10;
+ */
+ public com.google.protobuf.ByteString
+ getDependencyBytes() {
+ java.lang.Object ref = dependency_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ dependency_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string dependency = 10;
+ */
+ public Builder setDependency(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000200;
+ dependency_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string dependency = 10;
+ */
+ public Builder clearDependency() {
+ bitField0_ = (bitField0_ & ~0x00000200);
+ dependency_ = getDefaultInstance().getDependency();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string dependency = 10;
+ */
+ public Builder setDependencyBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000200;
+ dependency_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required string imageState = 11;
+ private java.lang.Object imageState_ = "";
+ /**
+ * required string imageState = 11;
+ */
+ public boolean hasImageState() {
+ return ((bitField0_ & 0x00000400) == 0x00000400);
+ }
+ /**
+ * required string imageState = 11;
+ */
+ public java.lang.String getImageState() {
+ java.lang.Object ref = imageState_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ imageState_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string imageState = 11;
+ */
+ public com.google.protobuf.ByteString
+ getImageStateBytes() {
+ java.lang.Object ref = imageState_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ imageState_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string imageState = 11;
+ */
+ public Builder setImageState(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000400;
+ imageState_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string imageState = 11;
+ */
+ public Builder clearImageState() {
+ bitField0_ = (bitField0_ & ~0x00000400);
+ imageState_ = getDefaultInstance().getImageState();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string imageState = 11;
+ */
+ public Builder setImageStateBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000400;
+ imageState_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required bool compacted = 12;
+ private boolean compacted_ ;
+ /**
+ * required bool compacted = 12;
+ */
+ public boolean hasCompacted() {
+ return ((bitField0_ & 0x00000800) == 0x00000800);
+ }
+ /**
+ * required bool compacted = 12;
+ */
+ public boolean getCompacted() {
+ return compacted_;
+ }
+ /**
+ * required bool compacted = 12;
+ */
+ public Builder setCompacted(boolean value) {
+ bitField0_ |= 0x00000800;
+ compacted_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required bool compacted = 12;
+ */
+ public Builder clearCompacted() {
+ bitField0_ = (bitField0_ & ~0x00000800);
+ compacted_ = false;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.BackupManifest)
+ }
+
+ static {
+ defaultInstance = new BackupManifest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.BackupManifest)
+ }
+
+ public interface BackupStatusOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string table = 1;
+ /**
+ * required string table = 1;
+ */
+ boolean hasTable();
+ /**
+ * required string table = 1;
+ */
+ java.lang.String getTable();
+ /**
+ * required string table = 1;
+ */
+ com.google.protobuf.ByteString
+ getTableBytes();
+
+ // required string targetDir = 2;
+ /**
+ * required string targetDir = 2;
+ */
+ boolean hasTargetDir();
+ /**
+ * required string targetDir = 2;
+ */
+ java.lang.String getTargetDir();
+ /**
+ * required string targetDir = 2;
+ */
+ com.google.protobuf.ByteString
+ getTargetDirBytes();
+
+ // optional string snapshot = 3;
+ /**
+ * optional string snapshot = 3;
+ */
+ boolean hasSnapshot();
+ /**
+ * optional string snapshot = 3;
+ */
+ java.lang.String getSnapshot();
+ /**
+ * optional string snapshot = 3;
+ */
+ com.google.protobuf.ByteString
+ getSnapshotBytes();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.BackupStatus}
+ */
+ public static final class BackupStatus extends
+ com.google.protobuf.GeneratedMessage
+ implements BackupStatusOrBuilder {
+ // Use BackupStatus.newBuilder() to construct.
+ private BackupStatus(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private BackupStatus(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final BackupStatus defaultInstance;
+ public static BackupStatus getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public BackupStatus getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private BackupStatus(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ table_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ targetDir_ = input.readBytes();
+ break;
+ }
+ case 26: {
+ bitField0_ |= 0x00000004;
+ snapshot_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupStatus_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupStatus_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public BackupStatus parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new BackupStatus(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string table = 1;
+ public static final int TABLE_FIELD_NUMBER = 1;
+ private java.lang.Object table_;
+ /**
+ * required string table = 1;
+ */
+ public boolean hasTable() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string table = 1;
+ */
+ public java.lang.String getTable() {
+ java.lang.Object ref = table_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ table_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string table = 1;
+ */
+ public com.google.protobuf.ByteString
+ getTableBytes() {
+ java.lang.Object ref = table_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ table_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required string targetDir = 2;
+ public static final int TARGETDIR_FIELD_NUMBER = 2;
+ private java.lang.Object targetDir_;
+ /**
+ * required string targetDir = 2;
+ */
+ public boolean hasTargetDir() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string targetDir = 2;
+ */
+ public java.lang.String getTargetDir() {
+ java.lang.Object ref = targetDir_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ targetDir_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string targetDir = 2;
+ */
+ public com.google.protobuf.ByteString
+ getTargetDirBytes() {
+ java.lang.Object ref = targetDir_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ targetDir_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string snapshot = 3;
+ public static final int SNAPSHOT_FIELD_NUMBER = 3;
+ private java.lang.Object snapshot_;
+ /**
+ * optional string snapshot = 3;
+ */
+ public boolean hasSnapshot() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional string snapshot = 3;
+ */
+ public java.lang.String getSnapshot() {
+ java.lang.Object ref = snapshot_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ snapshot_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string snapshot = 3;
+ */
+ public com.google.protobuf.ByteString
+ getSnapshotBytes() {
+ java.lang.Object ref = snapshot_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ snapshot_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ table_ = "";
+ targetDir_ = "";
+ snapshot_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasTable()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTargetDir()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getTableBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getTargetDirBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBytes(3, getSnapshotBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getTableBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getTargetDirBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(3, getSnapshotBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.BackupStatus}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatusOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupStatus_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupStatus_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ table_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ targetDir_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ snapshot_ = "";
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupStatus_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus build() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.table_ = table_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.targetDir_ = targetDir_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.snapshot_ = snapshot_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.getDefaultInstance()) return this;
+ if (other.hasTable()) {
+ bitField0_ |= 0x00000001;
+ table_ = other.table_;
+ onChanged();
+ }
+ if (other.hasTargetDir()) {
+ bitField0_ |= 0x00000002;
+ targetDir_ = other.targetDir_;
+ onChanged();
+ }
+ if (other.hasSnapshot()) {
+ bitField0_ |= 0x00000004;
+ snapshot_ = other.snapshot_;
+ onChanged();
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasTable()) {
+
+ return false;
+ }
+ if (!hasTargetDir()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string table = 1;
+ private java.lang.Object table_ = "";
+ /**
+ * required string table = 1;
+ */
+ public boolean hasTable() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string table = 1;
+ */
+ public java.lang.String getTable() {
+ java.lang.Object ref = table_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ table_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string table = 1;
+ */
+ public com.google.protobuf.ByteString
+ getTableBytes() {
+ java.lang.Object ref = table_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ table_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string table = 1;
+ */
+ public Builder setTable(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ table_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string table = 1;
+ */
+ public Builder clearTable() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ table_ = getDefaultInstance().getTable();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string table = 1;
+ */
+ public Builder setTableBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ table_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required string targetDir = 2;
+ private java.lang.Object targetDir_ = "";
+ /**
+ * required string targetDir = 2;
+ */
+ public boolean hasTargetDir() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string targetDir = 2;
+ */
+ public java.lang.String getTargetDir() {
+ java.lang.Object ref = targetDir_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ targetDir_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string targetDir = 2;
+ */
+ public com.google.protobuf.ByteString
+ getTargetDirBytes() {
+ java.lang.Object ref = targetDir_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ targetDir_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string targetDir = 2;
+ */
+ public Builder setTargetDir(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ targetDir_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string targetDir = 2;
+ */
+ public Builder clearTargetDir() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ targetDir_ = getDefaultInstance().getTargetDir();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string targetDir = 2;
+ */
+ public Builder setTargetDirBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ targetDir_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string snapshot = 3;
+ private java.lang.Object snapshot_ = "";
+ /**
+ * optional string snapshot = 3;
+ */
+ public boolean hasSnapshot() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * optional string snapshot = 3;
+ */
+ public java.lang.String getSnapshot() {
+ java.lang.Object ref = snapshot_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ snapshot_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string snapshot = 3;
+ */
+ public com.google.protobuf.ByteString
+ getSnapshotBytes() {
+ java.lang.Object ref = snapshot_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ snapshot_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string snapshot = 3;
+ */
+ public Builder setSnapshot(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ snapshot_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string snapshot = 3;
+ */
+ public Builder clearSnapshot() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ snapshot_ = getDefaultInstance().getSnapshot();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string snapshot = 3;
+ */
+ public Builder setSnapshotBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ snapshot_ = value;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.BackupStatus)
+ }
+
+ static {
+ defaultInstance = new BackupStatus(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.BackupStatus)
+ }
+
+ public interface TableBackupStatusOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string tableName = 1;
+ /**
+ * required string tableName = 1;
+ */
+ boolean hasTableName();
+ /**
+ * required string tableName = 1;
+ */
+ java.lang.String getTableName();
+ /**
+ * required string tableName = 1;
+ */
+ com.google.protobuf.ByteString
+ getTableNameBytes();
+
+ // required .hbase.pb.BackupStatus backupStatus = 2;
+ /**
+ * required .hbase.pb.BackupStatus backupStatus = 2;
+ */
+ boolean hasBackupStatus();
+ /**
+ * required .hbase.pb.BackupStatus backupStatus = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus getBackupStatus();
+ /**
+ * required .hbase.pb.BackupStatus backupStatus = 2;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatusOrBuilder getBackupStatusOrBuilder();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.TableBackupStatus}
+ */
+ public static final class TableBackupStatus extends
+ com.google.protobuf.GeneratedMessage
+ implements TableBackupStatusOrBuilder {
+ // Use TableBackupStatus.newBuilder() to construct.
+ private TableBackupStatus(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private TableBackupStatus(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final TableBackupStatus defaultInstance;
+ public static TableBackupStatus getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TableBackupStatus getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private TableBackupStatus(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ tableName_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = backupStatus_.toBuilder();
+ }
+ backupStatus_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(backupStatus_);
+ backupStatus_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public TableBackupStatus parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new TableBackupStatus(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string tableName = 1;
+ public static final int TABLENAME_FIELD_NUMBER = 1;
+ private java.lang.Object tableName_;
+ /**
+ * required string tableName = 1;
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string tableName = 1;
+ */
+ public java.lang.String getTableName() {
+ java.lang.Object ref = tableName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ tableName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string tableName = 1;
+ */
+ public com.google.protobuf.ByteString
+ getTableNameBytes() {
+ java.lang.Object ref = tableName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ tableName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required .hbase.pb.BackupStatus backupStatus = 2;
+ public static final int BACKUPSTATUS_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus backupStatus_;
+ /**
+ * required .hbase.pb.BackupStatus backupStatus = 2;
+ */
+ public boolean hasBackupStatus() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .hbase.pb.BackupStatus backupStatus = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus getBackupStatus() {
+ return backupStatus_;
+ }
+ /**
+ * required .hbase.pb.BackupStatus backupStatus = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatusOrBuilder getBackupStatusOrBuilder() {
+ return backupStatus_;
+ }
+
+ private void initFields() {
+ tableName_ = "";
+ backupStatus_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasTableName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasBackupStatus()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getBackupStatus().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getTableNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, backupStatus_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getTableNameBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, backupStatus_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.TableBackupStatus}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getBackupStatusFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ tableName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (backupStatusBuilder_ == null) {
+ backupStatus_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.getDefaultInstance();
+ } else {
+ backupStatusBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus build() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.tableName_ = tableName_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (backupStatusBuilder_ == null) {
+ result.backupStatus_ = backupStatus_;
+ } else {
+ result.backupStatus_ = backupStatusBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance()) return this;
+ if (other.hasTableName()) {
+ bitField0_ |= 0x00000001;
+ tableName_ = other.tableName_;
+ onChanged();
+ }
+ if (other.hasBackupStatus()) {
+ mergeBackupStatus(other.getBackupStatus());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasTableName()) {
+
+ return false;
+ }
+ if (!hasBackupStatus()) {
+
+ return false;
+ }
+ if (!getBackupStatus().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string tableName = 1;
+ private java.lang.Object tableName_ = "";
+ /**
+ * required string tableName = 1;
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string tableName = 1;
+ */
+ public java.lang.String getTableName() {
+ java.lang.Object ref = tableName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ tableName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string tableName = 1;
+ */
+ public com.google.protobuf.ByteString
+ getTableNameBytes() {
+ java.lang.Object ref = tableName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ tableName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string tableName = 1;
+ */
+ public Builder setTableName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ tableName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string tableName = 1;
+ */
+ public Builder clearTableName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ tableName_ = getDefaultInstance().getTableName();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string tableName = 1;
+ */
+ public Builder setTableNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ tableName_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required .hbase.pb.BackupStatus backupStatus = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus backupStatus_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatusOrBuilder> backupStatusBuilder_;
+ /**
+ * required .hbase.pb.BackupStatus backupStatus = 2;
+ */
+ public boolean hasBackupStatus() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required .hbase.pb.BackupStatus backupStatus = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus getBackupStatus() {
+ if (backupStatusBuilder_ == null) {
+ return backupStatus_;
+ } else {
+ return backupStatusBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .hbase.pb.BackupStatus backupStatus = 2;
+ */
+ public Builder setBackupStatus(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus value) {
+ if (backupStatusBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ backupStatus_ = value;
+ onChanged();
+ } else {
+ backupStatusBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .hbase.pb.BackupStatus backupStatus = 2;
+ */
+ public Builder setBackupStatus(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.Builder builderForValue) {
+ if (backupStatusBuilder_ == null) {
+ backupStatus_ = builderForValue.build();
+ onChanged();
+ } else {
+ backupStatusBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .hbase.pb.BackupStatus backupStatus = 2;
+ */
+ public Builder mergeBackupStatus(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus value) {
+ if (backupStatusBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ backupStatus_ != org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.getDefaultInstance()) {
+ backupStatus_ =
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.newBuilder(backupStatus_).mergeFrom(value).buildPartial();
+ } else {
+ backupStatus_ = value;
+ }
+ onChanged();
+ } else {
+ backupStatusBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * required .hbase.pb.BackupStatus backupStatus = 2;
+ */
+ public Builder clearBackupStatus() {
+ if (backupStatusBuilder_ == null) {
+ backupStatus_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.getDefaultInstance();
+ onChanged();
+ } else {
+ backupStatusBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * required .hbase.pb.BackupStatus backupStatus = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.Builder getBackupStatusBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getBackupStatusFieldBuilder().getBuilder();
+ }
+ /**
+ * required .hbase.pb.BackupStatus backupStatus = 2;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatusOrBuilder getBackupStatusOrBuilder() {
+ if (backupStatusBuilder_ != null) {
+ return backupStatusBuilder_.getMessageOrBuilder();
+ } else {
+ return backupStatus_;
+ }
+ }
+ /**
+ * required .hbase.pb.BackupStatus backupStatus = 2;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatusOrBuilder>
+ getBackupStatusFieldBuilder() {
+ if (backupStatusBuilder_ == null) {
+ backupStatusBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupStatusOrBuilder>(
+ backupStatus_,
+ getParentForChildren(),
+ isClean());
+ backupStatus_ = null;
+ }
+ return backupStatusBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.TableBackupStatus)
+ }
+
+ static {
+ defaultInstance = new TableBackupStatus(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.TableBackupStatus)
+ }
+
+ public interface BackupContextOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required string backupId = 1;
+ /**
+ * required string backupId = 1;
+ */
+ boolean hasBackupId();
+ /**
+ * required string backupId = 1;
+ */
+ java.lang.String getBackupId();
+ /**
+ * required string backupId = 1;
+ */
+ com.google.protobuf.ByteString
+ getBackupIdBytes();
+
+ // required string type = 2;
+ /**
+ * required string type = 2;
+ */
+ boolean hasType();
+ /**
+ * required string type = 2;
+ */
+ java.lang.String getType();
+ /**
+ * required string type = 2;
+ */
+ com.google.protobuf.ByteString
+ getTypeBytes();
+
+ // required string targetRootDir = 3;
+ /**
+ * required string targetRootDir = 3;
+ */
+ boolean hasTargetRootDir();
+ /**
+ * required string targetRootDir = 3;
+ */
+ java.lang.String getTargetRootDir();
+ /**
+ * required string targetRootDir = 3;
+ */
+ com.google.protobuf.ByteString
+ getTargetRootDirBytes();
+
+ // optional string flag = 4;
+ /**
+ * optional string flag = 4;
+ */
+ boolean hasFlag();
+ /**
+ * optional string flag = 4;
+ */
+ java.lang.String getFlag();
+ /**
+ * optional string flag = 4;
+ */
+ com.google.protobuf.ByteString
+ getFlagBytes();
+
+ // optional string phase = 5;
+ /**
+ * optional string phase = 5;
+ */
+ boolean hasPhase();
+ /**
+ * optional string phase = 5;
+ */
+ java.lang.String getPhase();
+ /**
+ * optional string phase = 5;
+ */
+ com.google.protobuf.ByteString
+ getPhaseBytes();
+
+ // optional string failedMessage = 6;
+ /**
+ * optional string failedMessage = 6;
+ */
+ boolean hasFailedMessage();
+ /**
+ * optional string failedMessage = 6;
+ */
+ java.lang.String getFailedMessage();
+ /**
+ * optional string failedMessage = 6;
+ */
+ com.google.protobuf.ByteString
+ getFailedMessageBytes();
+
+ // repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ java.util.List
+ getBackupStatusMapList();
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getBackupStatusMap(int index);
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ int getBackupStatusMapCount();
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder>
+ getBackupStatusMapOrBuilderList();
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getBackupStatusMapOrBuilder(
+ int index);
+
+ // required int64 startTs = 8;
+ /**
+ * required int64 startTs = 8;
+ */
+ boolean hasStartTs();
+ /**
+ * required int64 startTs = 8;
+ */
+ long getStartTs();
+
+ // required int64 endTs = 9;
+ /**
+ * required int64 endTs = 9;
+ */
+ boolean hasEndTs();
+ /**
+ * required int64 endTs = 9;
+ */
+ long getEndTs();
+
+ // required int64 totalBytesCopied = 10;
+ /**
+ * required int64 totalBytesCopied = 10;
+ */
+ boolean hasTotalBytesCopied();
+ /**
+ * required int64 totalBytesCopied = 10;
+ */
+ long getTotalBytesCopied();
+
+ // optional string hLogTargeDir = 11;
+ /**
+ * optional string hLogTargeDir = 11;
+ */
+ boolean hasHLogTargeDir();
+ /**
+ * optional string hLogTargeDir = 11;
+ */
+ java.lang.String getHLogTargeDir();
+ /**
+ * optional string hLogTargeDir = 11;
+ */
+ com.google.protobuf.ByteString
+ getHLogTargeDirBytes();
+
+ // required bool cancelled = 12;
+ /**
+ * required bool cancelled = 12;
+ */
+ boolean hasCancelled();
+ /**
+ * required bool cancelled = 12;
+ */
+ boolean getCancelled();
+
+ // optional string progress = 13;
+ /**
+ * optional string progress = 13;
+ */
+ boolean hasProgress();
+ /**
+ * optional string progress = 13;
+ */
+ java.lang.String getProgress();
+ /**
+ * optional string progress = 13;
+ */
+ com.google.protobuf.ByteString
+ getProgressBytes();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.BackupContext}
+ */
+ public static final class BackupContext extends
+ com.google.protobuf.GeneratedMessage
+ implements BackupContextOrBuilder {
+ // Use BackupContext.newBuilder() to construct.
+ private BackupContext(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private BackupContext(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final BackupContext defaultInstance;
+ public static BackupContext getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public BackupContext getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private BackupContext(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ backupId_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ type_ = input.readBytes();
+ break;
+ }
+ case 26: {
+ bitField0_ |= 0x00000004;
+ targetRootDir_ = input.readBytes();
+ break;
+ }
+ case 34: {
+ bitField0_ |= 0x00000008;
+ flag_ = input.readBytes();
+ break;
+ }
+ case 42: {
+ bitField0_ |= 0x00000010;
+ phase_ = input.readBytes();
+ break;
+ }
+ case 50: {
+ bitField0_ |= 0x00000020;
+ failedMessage_ = input.readBytes();
+ break;
+ }
+ case 58: {
+ if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+ backupStatusMap_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000040;
+ }
+ backupStatusMap_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.PARSER, extensionRegistry));
+ break;
+ }
+ case 64: {
+ bitField0_ |= 0x00000040;
+ startTs_ = input.readInt64();
+ break;
+ }
+ case 72: {
+ bitField0_ |= 0x00000080;
+ endTs_ = input.readInt64();
+ break;
+ }
+ case 80: {
+ bitField0_ |= 0x00000100;
+ totalBytesCopied_ = input.readInt64();
+ break;
+ }
+ case 90: {
+ bitField0_ |= 0x00000200;
+ hLogTargeDir_ = input.readBytes();
+ break;
+ }
+ case 96: {
+ bitField0_ |= 0x00000400;
+ cancelled_ = input.readBool();
+ break;
+ }
+ case 106: {
+ bitField0_ |= 0x00000800;
+ progress_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) {
+ backupStatusMap_ = java.util.Collections.unmodifiableList(backupStatusMap_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public BackupContext parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new BackupContext(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required string backupId = 1;
+ public static final int BACKUPID_FIELD_NUMBER = 1;
+ private java.lang.Object backupId_;
+ /**
+ * required string backupId = 1;
+ */
+ public boolean hasBackupId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string backupId = 1;
+ */
+ public java.lang.String getBackupId() {
+ java.lang.Object ref = backupId_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ backupId_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string backupId = 1;
+ */
+ public com.google.protobuf.ByteString
+ getBackupIdBytes() {
+ java.lang.Object ref = backupId_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ backupId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required string type = 2;
+ public static final int TYPE_FIELD_NUMBER = 2;
+ private java.lang.Object type_;
+ /**
+ * required string type = 2;
+ */
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string type = 2;
+ */
+ public java.lang.String getType() {
+ java.lang.Object ref = type_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ type_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string type = 2;
+ */
+ public com.google.protobuf.ByteString
+ getTypeBytes() {
+ java.lang.Object ref = type_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ type_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required string targetRootDir = 3;
+ public static final int TARGETROOTDIR_FIELD_NUMBER = 3;
+ private java.lang.Object targetRootDir_;
+ /**
+ * required string targetRootDir = 3;
+ */
+ public boolean hasTargetRootDir() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required string targetRootDir = 3;
+ */
+ public java.lang.String getTargetRootDir() {
+ java.lang.Object ref = targetRootDir_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ targetRootDir_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string targetRootDir = 3;
+ */
+ public com.google.protobuf.ByteString
+ getTargetRootDirBytes() {
+ java.lang.Object ref = targetRootDir_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ targetRootDir_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string flag = 4;
+ public static final int FLAG_FIELD_NUMBER = 4;
+ private java.lang.Object flag_;
+ /**
+ * optional string flag = 4;
+ */
+ public boolean hasFlag() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional string flag = 4;
+ */
+ public java.lang.String getFlag() {
+ java.lang.Object ref = flag_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ flag_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string flag = 4;
+ */
+ public com.google.protobuf.ByteString
+ getFlagBytes() {
+ java.lang.Object ref = flag_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ flag_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string phase = 5;
+ public static final int PHASE_FIELD_NUMBER = 5;
+ private java.lang.Object phase_;
+ /**
+ * optional string phase = 5;
+ */
+ public boolean hasPhase() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * optional string phase = 5;
+ */
+ public java.lang.String getPhase() {
+ java.lang.Object ref = phase_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ phase_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string phase = 5;
+ */
+ public com.google.protobuf.ByteString
+ getPhaseBytes() {
+ java.lang.Object ref = phase_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ phase_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // optional string failedMessage = 6;
+ public static final int FAILEDMESSAGE_FIELD_NUMBER = 6;
+ private java.lang.Object failedMessage_;
+ /**
+ * optional string failedMessage = 6;
+ */
+ public boolean hasFailedMessage() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * optional string failedMessage = 6;
+ */
+ public java.lang.String getFailedMessage() {
+ java.lang.Object ref = failedMessage_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ failedMessage_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string failedMessage = 6;
+ */
+ public com.google.protobuf.ByteString
+ getFailedMessageBytes() {
+ java.lang.Object ref = failedMessage_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ failedMessage_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ public static final int BACKUPSTATUSMAP_FIELD_NUMBER = 7;
+ private java.util.List backupStatusMap_;
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public java.util.List getBackupStatusMapList() {
+ return backupStatusMap_;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder>
+ getBackupStatusMapOrBuilderList() {
+ return backupStatusMap_;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public int getBackupStatusMapCount() {
+ return backupStatusMap_.size();
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getBackupStatusMap(int index) {
+ return backupStatusMap_.get(index);
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getBackupStatusMapOrBuilder(
+ int index) {
+ return backupStatusMap_.get(index);
+ }
+
+ // required int64 startTs = 8;
+ public static final int STARTTS_FIELD_NUMBER = 8;
+ private long startTs_;
+ /**
+ * required int64 startTs = 8;
+ */
+ public boolean hasStartTs() {
+ return ((bitField0_ & 0x00000040) == 0x00000040);
+ }
+ /**
+ * required int64 startTs = 8;
+ */
+ public long getStartTs() {
+ return startTs_;
+ }
+
+ // required int64 endTs = 9;
+ public static final int ENDTS_FIELD_NUMBER = 9;
+ private long endTs_;
+ /**
+ * required int64 endTs = 9;
+ */
+ public boolean hasEndTs() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ /**
+ * required int64 endTs = 9;
+ */
+ public long getEndTs() {
+ return endTs_;
+ }
+
+ // required int64 totalBytesCopied = 10;
+ public static final int TOTALBYTESCOPIED_FIELD_NUMBER = 10;
+ private long totalBytesCopied_;
+ /**
+ * required int64 totalBytesCopied = 10;
+ */
+ public boolean hasTotalBytesCopied() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * required int64 totalBytesCopied = 10;
+ */
+ public long getTotalBytesCopied() {
+ return totalBytesCopied_;
+ }
+
+ // optional string hLogTargeDir = 11;
+ public static final int HLOGTARGEDIR_FIELD_NUMBER = 11;
+ private java.lang.Object hLogTargeDir_;
+ /**
+ * optional string hLogTargeDir = 11;
+ */
+ public boolean hasHLogTargeDir() {
+ return ((bitField0_ & 0x00000200) == 0x00000200);
+ }
+ /**
+ * optional string hLogTargeDir = 11;
+ */
+ public java.lang.String getHLogTargeDir() {
+ java.lang.Object ref = hLogTargeDir_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ hLogTargeDir_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string hLogTargeDir = 11;
+ */
+ public com.google.protobuf.ByteString
+ getHLogTargeDirBytes() {
+ java.lang.Object ref = hLogTargeDir_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ hLogTargeDir_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ // required bool cancelled = 12;
+ public static final int CANCELLED_FIELD_NUMBER = 12;
+ private boolean cancelled_;
+ /**
+ * required bool cancelled = 12;
+ */
+ public boolean hasCancelled() {
+ return ((bitField0_ & 0x00000400) == 0x00000400);
+ }
+ /**
+ * required bool cancelled = 12;
+ */
+ public boolean getCancelled() {
+ return cancelled_;
+ }
+
+ // optional string progress = 13;
+ public static final int PROGRESS_FIELD_NUMBER = 13;
+ private java.lang.Object progress_;
+ /**
+ * optional string progress = 13;
+ */
+ public boolean hasProgress() {
+ return ((bitField0_ & 0x00000800) == 0x00000800);
+ }
+ /**
+ * optional string progress = 13;
+ */
+ public java.lang.String getProgress() {
+ java.lang.Object ref = progress_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ progress_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * optional string progress = 13;
+ */
+ public com.google.protobuf.ByteString
+ getProgressBytes() {
+ java.lang.Object ref = progress_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ progress_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ backupId_ = "";
+ type_ = "";
+ targetRootDir_ = "";
+ flag_ = "";
+ phase_ = "";
+ failedMessage_ = "";
+ backupStatusMap_ = java.util.Collections.emptyList();
+ startTs_ = 0L;
+ endTs_ = 0L;
+ totalBytesCopied_ = 0L;
+ hLogTargeDir_ = "";
+ cancelled_ = false;
+ progress_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasBackupId()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasType()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTargetRootDir()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasStartTs()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasEndTs()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTotalBytesCopied()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasCancelled()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getBackupStatusMapCount(); i++) {
+ if (!getBackupStatusMap(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, getBackupIdBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getTypeBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeBytes(3, getTargetRootDirBytes());
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeBytes(4, getFlagBytes());
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ output.writeBytes(5, getPhaseBytes());
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ output.writeBytes(6, getFailedMessageBytes());
+ }
+ for (int i = 0; i < backupStatusMap_.size(); i++) {
+ output.writeMessage(7, backupStatusMap_.get(i));
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ output.writeInt64(8, startTs_);
+ }
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ output.writeInt64(9, endTs_);
+ }
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ output.writeInt64(10, totalBytesCopied_);
+ }
+ if (((bitField0_ & 0x00000200) == 0x00000200)) {
+ output.writeBytes(11, getHLogTargeDirBytes());
+ }
+ if (((bitField0_ & 0x00000400) == 0x00000400)) {
+ output.writeBool(12, cancelled_);
+ }
+ if (((bitField0_ & 0x00000800) == 0x00000800)) {
+ output.writeBytes(13, getProgressBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, getBackupIdBytes());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getTypeBytes());
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(3, getTargetRootDirBytes());
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(4, getFlagBytes());
+ }
+ if (((bitField0_ & 0x00000010) == 0x00000010)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(5, getPhaseBytes());
+ }
+ if (((bitField0_ & 0x00000020) == 0x00000020)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(6, getFailedMessageBytes());
+ }
+ for (int i = 0; i < backupStatusMap_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(7, backupStatusMap_.get(i));
+ }
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(8, startTs_);
+ }
+ if (((bitField0_ & 0x00000080) == 0x00000080)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(9, endTs_);
+ }
+ if (((bitField0_ & 0x00000100) == 0x00000100)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(10, totalBytesCopied_);
+ }
+ if (((bitField0_ & 0x00000200) == 0x00000200)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(11, getHLogTargeDirBytes());
+ }
+ if (((bitField0_ & 0x00000400) == 0x00000400)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBoolSize(12, cancelled_);
+ }
+ if (((bitField0_ & 0x00000800) == 0x00000800)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(13, getProgressBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.BackupContext}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContextOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getBackupStatusMapFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ backupId_ = "";
+ bitField0_ = (bitField0_ & ~0x00000001);
+ type_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ targetRootDir_ = "";
+ bitField0_ = (bitField0_ & ~0x00000004);
+ flag_ = "";
+ bitField0_ = (bitField0_ & ~0x00000008);
+ phase_ = "";
+ bitField0_ = (bitField0_ & ~0x00000010);
+ failedMessage_ = "";
+ bitField0_ = (bitField0_ & ~0x00000020);
+ if (backupStatusMapBuilder_ == null) {
+ backupStatusMap_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000040);
+ } else {
+ backupStatusMapBuilder_.clear();
+ }
+ startTs_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000080);
+ endTs_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000100);
+ totalBytesCopied_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000200);
+ hLogTargeDir_ = "";
+ bitField0_ = (bitField0_ & ~0x00000400);
+ cancelled_ = false;
+ bitField0_ = (bitField0_ & ~0x00000800);
+ progress_ = "";
+ bitField0_ = (bitField0_ & ~0x00001000);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext build() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.backupId_ = backupId_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.type_ = type_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.targetRootDir_ = targetRootDir_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.flag_ = flag_;
+ if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+ to_bitField0_ |= 0x00000010;
+ }
+ result.phase_ = phase_;
+ if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+ to_bitField0_ |= 0x00000020;
+ }
+ result.failedMessage_ = failedMessage_;
+ if (backupStatusMapBuilder_ == null) {
+ if (((bitField0_ & 0x00000040) == 0x00000040)) {
+ backupStatusMap_ = java.util.Collections.unmodifiableList(backupStatusMap_);
+ bitField0_ = (bitField0_ & ~0x00000040);
+ }
+ result.backupStatusMap_ = backupStatusMap_;
+ } else {
+ result.backupStatusMap_ = backupStatusMapBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
+ to_bitField0_ |= 0x00000040;
+ }
+ result.startTs_ = startTs_;
+ if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
+ to_bitField0_ |= 0x00000080;
+ }
+ result.endTs_ = endTs_;
+ if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
+ to_bitField0_ |= 0x00000100;
+ }
+ result.totalBytesCopied_ = totalBytesCopied_;
+ if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
+ to_bitField0_ |= 0x00000200;
+ }
+ result.hLogTargeDir_ = hLogTargeDir_;
+ if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
+ to_bitField0_ |= 0x00000400;
+ }
+ result.cancelled_ = cancelled_;
+ if (((from_bitField0_ & 0x00001000) == 0x00001000)) {
+ to_bitField0_ |= 0x00000800;
+ }
+ result.progress_ = progress_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance()) return this;
+ if (other.hasBackupId()) {
+ bitField0_ |= 0x00000001;
+ backupId_ = other.backupId_;
+ onChanged();
+ }
+ if (other.hasType()) {
+ bitField0_ |= 0x00000002;
+ type_ = other.type_;
+ onChanged();
+ }
+ if (other.hasTargetRootDir()) {
+ bitField0_ |= 0x00000004;
+ targetRootDir_ = other.targetRootDir_;
+ onChanged();
+ }
+ if (other.hasFlag()) {
+ bitField0_ |= 0x00000008;
+ flag_ = other.flag_;
+ onChanged();
+ }
+ if (other.hasPhase()) {
+ bitField0_ |= 0x00000010;
+ phase_ = other.phase_;
+ onChanged();
+ }
+ if (other.hasFailedMessage()) {
+ bitField0_ |= 0x00000020;
+ failedMessage_ = other.failedMessage_;
+ onChanged();
+ }
+ if (backupStatusMapBuilder_ == null) {
+ if (!other.backupStatusMap_.isEmpty()) {
+ if (backupStatusMap_.isEmpty()) {
+ backupStatusMap_ = other.backupStatusMap_;
+ bitField0_ = (bitField0_ & ~0x00000040);
+ } else {
+ ensureBackupStatusMapIsMutable();
+ backupStatusMap_.addAll(other.backupStatusMap_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.backupStatusMap_.isEmpty()) {
+ if (backupStatusMapBuilder_.isEmpty()) {
+ backupStatusMapBuilder_.dispose();
+ backupStatusMapBuilder_ = null;
+ backupStatusMap_ = other.backupStatusMap_;
+ bitField0_ = (bitField0_ & ~0x00000040);
+ backupStatusMapBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getBackupStatusMapFieldBuilder() : null;
+ } else {
+ backupStatusMapBuilder_.addAllMessages(other.backupStatusMap_);
+ }
+ }
+ }
+ if (other.hasStartTs()) {
+ setStartTs(other.getStartTs());
+ }
+ if (other.hasEndTs()) {
+ setEndTs(other.getEndTs());
+ }
+ if (other.hasTotalBytesCopied()) {
+ setTotalBytesCopied(other.getTotalBytesCopied());
+ }
+ if (other.hasHLogTargeDir()) {
+ bitField0_ |= 0x00000400;
+ hLogTargeDir_ = other.hLogTargeDir_;
+ onChanged();
+ }
+ if (other.hasCancelled()) {
+ setCancelled(other.getCancelled());
+ }
+ if (other.hasProgress()) {
+ bitField0_ |= 0x00001000;
+ progress_ = other.progress_;
+ onChanged();
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasBackupId()) {
+
+ return false;
+ }
+ if (!hasType()) {
+
+ return false;
+ }
+ if (!hasTargetRootDir()) {
+
+ return false;
+ }
+ if (!hasStartTs()) {
+
+ return false;
+ }
+ if (!hasEndTs()) {
+
+ return false;
+ }
+ if (!hasTotalBytesCopied()) {
+
+ return false;
+ }
+ if (!hasCancelled()) {
+
+ return false;
+ }
+ for (int i = 0; i < getBackupStatusMapCount(); i++) {
+ if (!getBackupStatusMap(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required string backupId = 1;
+ private java.lang.Object backupId_ = "";
+ /**
+ * required string backupId = 1;
+ */
+ public boolean hasBackupId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required string backupId = 1;
+ */
+ public java.lang.String getBackupId() {
+ java.lang.Object ref = backupId_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ backupId_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string backupId = 1;
+ */
+ public com.google.protobuf.ByteString
+ getBackupIdBytes() {
+ java.lang.Object ref = backupId_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ backupId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string backupId = 1;
+ */
+ public Builder setBackupId(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ backupId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string backupId = 1;
+ */
+ public Builder clearBackupId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ backupId_ = getDefaultInstance().getBackupId();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string backupId = 1;
+ */
+ public Builder setBackupIdBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ backupId_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required string type = 2;
+ private java.lang.Object type_ = "";
+ /**
+ * required string type = 2;
+ */
+ public boolean hasType() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string type = 2;
+ */
+ public java.lang.String getType() {
+ java.lang.Object ref = type_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ type_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string type = 2;
+ */
+ public com.google.protobuf.ByteString
+ getTypeBytes() {
+ java.lang.Object ref = type_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ type_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string type = 2;
+ */
+ public Builder setType(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ type_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string type = 2;
+ */
+ public Builder clearType() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ type_ = getDefaultInstance().getType();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string type = 2;
+ */
+ public Builder setTypeBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ type_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required string targetRootDir = 3;
+ private java.lang.Object targetRootDir_ = "";
+ /**
+ * required string targetRootDir = 3;
+ */
+ public boolean hasTargetRootDir() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * required string targetRootDir = 3;
+ */
+ public java.lang.String getTargetRootDir() {
+ java.lang.Object ref = targetRootDir_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ targetRootDir_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string targetRootDir = 3;
+ */
+ public com.google.protobuf.ByteString
+ getTargetRootDirBytes() {
+ java.lang.Object ref = targetRootDir_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ targetRootDir_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string targetRootDir = 3;
+ */
+ public Builder setTargetRootDir(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ targetRootDir_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string targetRootDir = 3;
+ */
+ public Builder clearTargetRootDir() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ targetRootDir_ = getDefaultInstance().getTargetRootDir();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string targetRootDir = 3;
+ */
+ public Builder setTargetRootDirBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000004;
+ targetRootDir_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string flag = 4;
+ private java.lang.Object flag_ = "";
+ /**
+ * optional string flag = 4;
+ */
+ public boolean hasFlag() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional string flag = 4;
+ */
+ public java.lang.String getFlag() {
+ java.lang.Object ref = flag_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ flag_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string flag = 4;
+ */
+ public com.google.protobuf.ByteString
+ getFlagBytes() {
+ java.lang.Object ref = flag_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ flag_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string flag = 4;
+ */
+ public Builder setFlag(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ flag_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string flag = 4;
+ */
+ public Builder clearFlag() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ flag_ = getDefaultInstance().getFlag();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string flag = 4;
+ */
+ public Builder setFlagBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000008;
+ flag_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string phase = 5;
+ private java.lang.Object phase_ = "";
+ /**
+ * optional string phase = 5;
+ */
+ public boolean hasPhase() {
+ return ((bitField0_ & 0x00000010) == 0x00000010);
+ }
+ /**
+ * optional string phase = 5;
+ */
+ public java.lang.String getPhase() {
+ java.lang.Object ref = phase_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ phase_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string phase = 5;
+ */
+ public com.google.protobuf.ByteString
+ getPhaseBytes() {
+ java.lang.Object ref = phase_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ phase_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string phase = 5;
+ */
+ public Builder setPhase(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000010;
+ phase_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string phase = 5;
+ */
+ public Builder clearPhase() {
+ bitField0_ = (bitField0_ & ~0x00000010);
+ phase_ = getDefaultInstance().getPhase();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string phase = 5;
+ */
+ public Builder setPhaseBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000010;
+ phase_ = value;
+ onChanged();
+ return this;
+ }
+
+ // optional string failedMessage = 6;
+ private java.lang.Object failedMessage_ = "";
+ /**
+ * optional string failedMessage = 6;
+ */
+ public boolean hasFailedMessage() {
+ return ((bitField0_ & 0x00000020) == 0x00000020);
+ }
+ /**
+ * optional string failedMessage = 6;
+ */
+ public java.lang.String getFailedMessage() {
+ java.lang.Object ref = failedMessage_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ failedMessage_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string failedMessage = 6;
+ */
+ public com.google.protobuf.ByteString
+ getFailedMessageBytes() {
+ java.lang.Object ref = failedMessage_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ failedMessage_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string failedMessage = 6;
+ */
+ public Builder setFailedMessage(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000020;
+ failedMessage_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string failedMessage = 6;
+ */
+ public Builder clearFailedMessage() {
+ bitField0_ = (bitField0_ & ~0x00000020);
+ failedMessage_ = getDefaultInstance().getFailedMessage();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string failedMessage = 6;
+ */
+ public Builder setFailedMessageBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000020;
+ failedMessage_ = value;
+ onChanged();
+ return this;
+ }
+
+ // repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ private java.util.List backupStatusMap_ =
+ java.util.Collections.emptyList();
+ private void ensureBackupStatusMapIsMutable() {
+ if (!((bitField0_ & 0x00000040) == 0x00000040)) {
+ backupStatusMap_ = new java.util.ArrayList(backupStatusMap_);
+ bitField0_ |= 0x00000040;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder> backupStatusMapBuilder_;
+
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public java.util.List getBackupStatusMapList() {
+ if (backupStatusMapBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(backupStatusMap_);
+ } else {
+ return backupStatusMapBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public int getBackupStatusMapCount() {
+ if (backupStatusMapBuilder_ == null) {
+ return backupStatusMap_.size();
+ } else {
+ return backupStatusMapBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getBackupStatusMap(int index) {
+ if (backupStatusMapBuilder_ == null) {
+ return backupStatusMap_.get(index);
+ } else {
+ return backupStatusMapBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public Builder setBackupStatusMap(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) {
+ if (backupStatusMapBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureBackupStatusMapIsMutable();
+ backupStatusMap_.set(index, value);
+ onChanged();
+ } else {
+ backupStatusMapBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public Builder setBackupStatusMap(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) {
+ if (backupStatusMapBuilder_ == null) {
+ ensureBackupStatusMapIsMutable();
+ backupStatusMap_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ backupStatusMapBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public Builder addBackupStatusMap(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) {
+ if (backupStatusMapBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureBackupStatusMapIsMutable();
+ backupStatusMap_.add(value);
+ onChanged();
+ } else {
+ backupStatusMapBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public Builder addBackupStatusMap(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) {
+ if (backupStatusMapBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureBackupStatusMapIsMutable();
+ backupStatusMap_.add(index, value);
+ onChanged();
+ } else {
+ backupStatusMapBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public Builder addBackupStatusMap(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) {
+ if (backupStatusMapBuilder_ == null) {
+ ensureBackupStatusMapIsMutable();
+ backupStatusMap_.add(builderForValue.build());
+ onChanged();
+ } else {
+ backupStatusMapBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public Builder addBackupStatusMap(
+ int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) {
+ if (backupStatusMapBuilder_ == null) {
+ ensureBackupStatusMapIsMutable();
+ backupStatusMap_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ backupStatusMapBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public Builder addAllBackupStatusMap(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus> values) {
+ if (backupStatusMapBuilder_ == null) {
+ ensureBackupStatusMapIsMutable();
+ super.addAll(values, backupStatusMap_);
+ onChanged();
+ } else {
+ backupStatusMapBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public Builder clearBackupStatusMap() {
+ if (backupStatusMapBuilder_ == null) {
+ backupStatusMap_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000040);
+ onChanged();
+ } else {
+ backupStatusMapBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public Builder removeBackupStatusMap(int index) {
+ if (backupStatusMapBuilder_ == null) {
+ ensureBackupStatusMapIsMutable();
+ backupStatusMap_.remove(index);
+ onChanged();
+ } else {
+ backupStatusMapBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder getBackupStatusMapBuilder(
+ int index) {
+ return getBackupStatusMapFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getBackupStatusMapOrBuilder(
+ int index) {
+ if (backupStatusMapBuilder_ == null) {
+ return backupStatusMap_.get(index); } else {
+ return backupStatusMapBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder>
+ getBackupStatusMapOrBuilderList() {
+ if (backupStatusMapBuilder_ != null) {
+ return backupStatusMapBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(backupStatusMap_);
+ }
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder addBackupStatusMapBuilder() {
+ return getBackupStatusMapFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder addBackupStatusMapBuilder(
+ int index) {
+ return getBackupStatusMapFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.TableBackupStatus backupStatusMap = 7;
+ */
+ public java.util.List
+ getBackupStatusMapBuilderList() {
+ return getBackupStatusMapFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder>
+ getBackupStatusMapFieldBuilder() {
+ if (backupStatusMapBuilder_ == null) {
+ backupStatusMapBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder>(
+ backupStatusMap_,
+ ((bitField0_ & 0x00000040) == 0x00000040),
+ getParentForChildren(),
+ isClean());
+ backupStatusMap_ = null;
+ }
+ return backupStatusMapBuilder_;
+ }
+
+ // required int64 startTs = 8;
+ private long startTs_ ;
+ /**
+ * required int64 startTs = 8;
+ */
+ public boolean hasStartTs() {
+ return ((bitField0_ & 0x00000080) == 0x00000080);
+ }
+ /**
+ * required int64 startTs = 8;
+ */
+ public long getStartTs() {
+ return startTs_;
+ }
+ /**
+ * required int64 startTs = 8;
+ */
+ public Builder setStartTs(long value) {
+ bitField0_ |= 0x00000080;
+ startTs_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required int64 startTs = 8;
+ */
+ public Builder clearStartTs() {
+ bitField0_ = (bitField0_ & ~0x00000080);
+ startTs_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // required int64 endTs = 9;
+ private long endTs_ ;
+ /**
+ * required int64 endTs = 9;
+ */
+ public boolean hasEndTs() {
+ return ((bitField0_ & 0x00000100) == 0x00000100);
+ }
+ /**
+ * required int64 endTs = 9;
+ */
+ public long getEndTs() {
+ return endTs_;
+ }
+ /**
+ * required int64 endTs = 9;
+ */
+ public Builder setEndTs(long value) {
+ bitField0_ |= 0x00000100;
+ endTs_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required int64 endTs = 9;
+ */
+ public Builder clearEndTs() {
+ bitField0_ = (bitField0_ & ~0x00000100);
+ endTs_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // required int64 totalBytesCopied = 10;
+ private long totalBytesCopied_ ;
+ /**
+ * required int64 totalBytesCopied = 10;
+ */
+ public boolean hasTotalBytesCopied() {
+ return ((bitField0_ & 0x00000200) == 0x00000200);
+ }
+ /**
+ * required int64 totalBytesCopied = 10;
+ */
+ public long getTotalBytesCopied() {
+ return totalBytesCopied_;
+ }
+ /**
+ * required int64 totalBytesCopied = 10;
+ */
+ public Builder setTotalBytesCopied(long value) {
+ bitField0_ |= 0x00000200;
+ totalBytesCopied_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required int64 totalBytesCopied = 10;
+ */
+ public Builder clearTotalBytesCopied() {
+ bitField0_ = (bitField0_ & ~0x00000200);
+ totalBytesCopied_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // optional string hLogTargeDir = 11;
+ private java.lang.Object hLogTargeDir_ = "";
+ /**
+ * optional string hLogTargeDir = 11;
+ */
+ public boolean hasHLogTargeDir() {
+ return ((bitField0_ & 0x00000400) == 0x00000400);
+ }
+ /**
+ * optional string hLogTargeDir = 11;
+ */
+ public java.lang.String getHLogTargeDir() {
+ java.lang.Object ref = hLogTargeDir_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ hLogTargeDir_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string hLogTargeDir = 11;
+ */
+ public com.google.protobuf.ByteString
+ getHLogTargeDirBytes() {
+ java.lang.Object ref = hLogTargeDir_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ hLogTargeDir_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string hLogTargeDir = 11;
+ */
+ public Builder setHLogTargeDir(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000400;
+ hLogTargeDir_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string hLogTargeDir = 11;
+ */
+ public Builder clearHLogTargeDir() {
+ bitField0_ = (bitField0_ & ~0x00000400);
+ hLogTargeDir_ = getDefaultInstance().getHLogTargeDir();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string hLogTargeDir = 11;
+ */
+ public Builder setHLogTargeDirBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000400;
+ hLogTargeDir_ = value;
+ onChanged();
+ return this;
+ }
+
+ // required bool cancelled = 12;
+ private boolean cancelled_ ;
+ /**
+ * required bool cancelled = 12;
+ */
+ public boolean hasCancelled() {
+ return ((bitField0_ & 0x00000800) == 0x00000800);
+ }
+ /**
+ * required bool cancelled = 12;
+ */
+ public boolean getCancelled() {
+ return cancelled_;
+ }
+ /**
+ * required bool cancelled = 12;
+ */
+ public Builder setCancelled(boolean value) {
+ bitField0_ |= 0x00000800;
+ cancelled_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required bool cancelled = 12;
+ */
+ public Builder clearCancelled() {
+ bitField0_ = (bitField0_ & ~0x00000800);
+ cancelled_ = false;
+ onChanged();
+ return this;
+ }
+
+ // optional string progress = 13;
+ private java.lang.Object progress_ = "";
+ /**
+ * optional string progress = 13;
+ */
+ public boolean hasProgress() {
+ return ((bitField0_ & 0x00001000) == 0x00001000);
+ }
+ /**
+ * optional string progress = 13;
+ */
+ public java.lang.String getProgress() {
+ java.lang.Object ref = progress_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ progress_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * optional string progress = 13;
+ */
+ public com.google.protobuf.ByteString
+ getProgressBytes() {
+ java.lang.Object ref = progress_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ progress_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * optional string progress = 13;
+ */
+ public Builder setProgress(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00001000;
+ progress_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string progress = 13;
+ */
+ public Builder clearProgress() {
+ bitField0_ = (bitField0_ & ~0x00001000);
+ progress_ = getDefaultInstance().getProgress();
+ onChanged();
+ return this;
+ }
+ /**
+ * optional string progress = 13;
+ */
+ public Builder setProgressBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00001000;
+ progress_ = value;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.BackupContext)
+ }
+
+ static {
+ defaultInstance = new BackupContext(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.BackupContext)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_BackupManifest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_BackupManifest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_BackupStatus_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_BackupStatus_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_TableBackupStatus_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_BackupContext_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_BackupContext_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n\014Backup.proto\022\010hbase.pb\"\355\001\n\016BackupManif" +
+ "est\022\017\n\007version\030\001 \002(\t\022\r\n\005token\030\002 \002(\t\022\014\n\004t" +
+ "ype\030\003 \002(\t\022\020\n\010tableSet\030\004 \002(\t\022\017\n\007startTs\030\005" +
+ " \002(\003\022\022\n\ncompleteTs\030\006 \002(\003\022\022\n\ntableBytes\030\007" +
+ " \002(\003\022\020\n\010logBytes\030\010 \001(\003\022\025\n\rincrTimerange\030" +
+ "\t \002(\t\022\022\n\ndependency\030\n \002(\t\022\022\n\nimageState\030" +
+ "\013 \002(\t\022\021\n\tcompacted\030\014 \002(\010\"B\n\014BackupStatus" +
+ "\022\r\n\005table\030\001 \002(\t\022\021\n\ttargetDir\030\002 \002(\t\022\020\n\010sn" +
+ "apshot\030\003 \001(\t\"T\n\021TableBackupStatus\022\021\n\ttab" +
+ "leName\030\001 \002(\t\022,\n\014backupStatus\030\002 \002(\0132\026.hba",
+ "se.pb.BackupStatus\"\245\002\n\rBackupContext\022\020\n\010" +
+ "backupId\030\001 \002(\t\022\014\n\004type\030\002 \002(\t\022\025\n\rtargetRo" +
+ "otDir\030\003 \002(\t\022\014\n\004flag\030\004 \001(\t\022\r\n\005phase\030\005 \001(\t" +
+ "\022\025\n\rfailedMessage\030\006 \001(\t\0224\n\017backupStatusM" +
+ "ap\030\007 \003(\0132\033.hbase.pb.TableBackupStatus\022\017\n" +
+ "\007startTs\030\010 \002(\003\022\r\n\005endTs\030\t \002(\003\022\030\n\020totalBy" +
+ "tesCopied\030\n \002(\003\022\024\n\014hLogTargeDir\030\013 \001(\t\022\021\n" +
+ "\tcancelled\030\014 \002(\010\022\020\n\010progress\030\r \001(\tB=\n*or" +
+ "g.apache.hadoop.hbase.protobuf.generated" +
+ "B\014BackupProtos\210\001\001"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+ new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_hbase_pb_BackupManifest_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_hbase_pb_BackupManifest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_BackupManifest_descriptor,
+ new java.lang.String[] { "Version", "Token", "Type", "TableSet", "StartTs", "CompleteTs", "TableBytes", "LogBytes", "IncrTimerange", "Dependency", "ImageState", "Compacted", });
+ internal_static_hbase_pb_BackupStatus_descriptor =
+ getDescriptor().getMessageTypes().get(1);
+ internal_static_hbase_pb_BackupStatus_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_BackupStatus_descriptor,
+ new java.lang.String[] { "Table", "TargetDir", "Snapshot", });
+ internal_static_hbase_pb_TableBackupStatus_descriptor =
+ getDescriptor().getMessageTypes().get(2);
+ internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_TableBackupStatus_descriptor,
+ new java.lang.String[] { "TableName", "BackupStatus", });
+ internal_static_hbase_pb_BackupContext_descriptor =
+ getDescriptor().getMessageTypes().get(3);
+ internal_static_hbase_pb_BackupContext_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_BackupContext_descriptor,
+ new java.lang.String[] { "BackupId", "Type", "TargetRootDir", "Flag", "Phase", "FailedMessage", "BackupStatusMap", "StartTs", "EndTs", "TotalBytesCopied", "HLogTargeDir", "Cancelled", "Progress", });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git hbase-protocol/src/main/protobuf/Backup.proto hbase-protocol/src/main/protobuf/Backup.proto
new file mode 100644
index 0000000..234b1d1
--- /dev/null
+++ hbase-protocol/src/main/protobuf/Backup.proto
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file contains Backup manifest
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "BackupProtos";
+option java_generic_services = true;
+
+message BackupManifest {
+ required string version = 1;
+ required string token = 2;
+ required string type = 3;
+ required string tableSet = 4;
+ required int64 startTs = 5;
+ required int64 completeTs = 6;
+ required int64 tableBytes = 7;
+ optional int64 logBytes = 8;
+ required string incrTimerange = 9;
+ required string dependency = 10;
+ required string imageState = 11;
+ required bool compacted = 12;
+
+}
+
+message BackupStatus {
+ required string table= 1;
+ required string targetDir = 2;
+ optional string snapshot = 3;
+}
+
+message TableBackupStatus {
+ required string tableName = 1;
+ required BackupStatus backupStatus = 2;
+}
+
+message BackupContext {
+ required string backupId = 1;
+ required string type = 2;
+ required string targetRootDir = 3;
+ optional string flag = 4;
+ optional string phase = 5;
+ optional string failedMessage = 6;
+ repeated TableBackupStatus backupStatusMap = 7;
+ required int64 startTs = 8;
+ required int64 endTs = 9;
+ required int64 totalBytesCopied = 10;
+ optional string hLogTargeDir = 11;
+ required bool cancelled = 12;
+ optional string progress = 13;
+
+}
\ No newline at end of file
diff --git hbase-server/pom.xml hbase-server/pom.xml
index 26aad71..807b021 100644
--- hbase-server/pom.xml
+++ hbase-server/pom.xml
@@ -394,6 +394,11 @@
${project.version}
true
+
+ org.apache.hadoop
+ hadoop-distcp
+ ${hadoop-two.version}
+
commons-httpclient
commons-httpclient
@@ -407,6 +412,11 @@
commons-collections
+ org.apache.hadoop
+ hadoop-distcp
+ ${hadoop-two.version}
+
+
org.apache.hbase
hbase-hadoop-compat
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java
new file mode 100644
index 0000000..56cb870
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java
@@ -0,0 +1,316 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Set;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.zookeeper.ZKUtils;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.zookeeper.KeeperException;
+
+/**
+ * Backup HBase tables locally or on a remote cluster Serve as client entry point for the following
+ * features: - Full Backup provide local and remote back/restore for a list of tables - Incremental
+ * backup to build on top of full backup as daily/weekly backup - Convert incremental backup WAL
+ * files into hfiles - Merge several backup images into one(like merge weekly into monthly) - Add
+ * and remove table to and from Backup image - Cancel a backup process - Full backup based on
+ * existing snapshot - Describe information of a backup image
+ */
+
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public final class BackupClient {
+ private static final Log LOG = LogFactory.getLog(BackupClient.class);
+ private static Options opt;
+ private static Configuration conf = null;
+
+ private BackupClient() {
+ throw new AssertionError("Instantiating utility class...");
+ }
+
+ protected static void init() throws IOException {
+ // define supported options
+ opt = new Options();
+
+ opt.addOption("all", false, "All tables");
+ opt.addOption("debug", false, "Enable debug loggings");
+ opt.addOption("t", true, "Table name");
+
+ // create configuration instance
+ conf = getConf();
+
+ // disable irrelevant loggers to avoid it mess up command output
+ ZKUtils.disableUselessLoggers(LOG);
+
+ }
+
+
+ public static void main(String[] args) throws IOException {
+ init();
+ parseAndRun(args);
+ System.exit(0);
+ }
+
+ /**
+ * Set the configuration from a given one.
+ * @param newConf A new given configuration
+ */
+ public synchronized static void setConf(Configuration newConf) {
+ conf = newConf;
+ BackupUtil.setConf(newConf);
+ }
+
+ public static Configuration getConf() {
+ if (conf == null) {
+ conf = BackupUtil.getConf();
+ }
+ return conf;
+ }
+
+ private static void parseAndRun(String[] args) throws IOException {
+
+ String cmd = null;
+ String[] remainArgs = null;
+ if (args == null || args.length == 0) {
+ BackupCommands.createCommand(BackupRestoreConstants.BackupCommand.HELP, null).execute();
+ } else {
+ cmd = args[0];
+ remainArgs = new String[args.length - 1];
+ if (args.length > 1) {
+ System.arraycopy(args, 1, remainArgs, 0, args.length - 1);
+ }
+ }
+ CommandLine cmdline = null;
+ try {
+ cmdline = new PosixParser().parse(opt, remainArgs);
+ } catch (ParseException e) {
+ LOG.error("Could not parse command", e);
+ System.exit(-1);
+ }
+
+ BackupCommand type = BackupCommand.HELP;
+ if (BackupCommand.CREATE.name().equalsIgnoreCase(cmd)) {
+ type = BackupCommand.CREATE;
+ } else if (BackupCommand.HELP.name().equalsIgnoreCase(cmd)) {
+ type = BackupCommand.HELP;
+ } else {
+ System.out.println("Unsupported command for backup: " + cmd);
+ }
+
+ // enable debug logging
+ Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup");
+ if (cmdline.hasOption("debug")) {
+ backupClientLogger.setLevel(Level.DEBUG);
+ } else {
+ backupClientLogger.setLevel(Level.INFO);
+ }
+
+ BackupCommands.createCommand(type, cmdline).execute();
+ }
+
+ /**
+ * Send backup request to server, and monitor the progress if necessary
+ * @param backupType : full or incremental
+ * @param backupRootPath : the rooPath specified by user
+ * @param tableListStr : the table list specified by user
+ * @param snapshot : using existing snapshot if specified by user (in future jira)
+ * @return backupId backup id
+ * @throws IOException exception
+ * @throws KeeperException excpetion
+ */
+ public static String create(BackupType backupType, String backupRootPath, String tableListStr,
+ String snapshot) throws IOException {
+
+ String backupId = BackupRestoreConstants.BACKUPID_PREFIX + EnvironmentEdgeManager.currentTime();
+
+ // check target path first, confirm it doesn't exist before backup
+ boolean targetExists = false;
+ try {
+ targetExists = HBackupFileSystem.checkPathExist(backupRootPath, conf);
+ } catch (IOException e) {
+ String expMsg = e.getMessage();
+ String newMsg = null;
+ if (expMsg.contains("No FileSystem for scheme")) {
+ newMsg =
+ "Unsupported filesystem scheme found in the backup target url. Error Message: "
+ + newMsg;
+ LOG.error(newMsg);
+ throw new IOException(newMsg);
+ } else {
+ throw e;
+ }
+ } catch (RuntimeException e) {
+ LOG.error(e.getMessage());
+ throw e;
+ }
+
+ if (targetExists) {
+ LOG.info("Using existing backup root dir: " + backupRootPath);
+ } else {
+ LOG.info("Backup root dir " + backupRootPath + " does not exist. Will be created.");
+ }
+
+ // table list specified for backup, trigger backup on specified tables
+ String tableList = tableListStr;
+ try {
+ requestBackup(backupId, backupType, tableList, backupRootPath, snapshot);
+ } catch (RuntimeException e) {
+ String errMsg = e.getMessage();
+ if (errMsg != null
+ && (errMsg.startsWith("Non-existing tables found") || errMsg
+ .startsWith("Snapshot is not found"))) {
+ LOG.error(errMsg + ", please check your command");
+ throw e;
+ } else {
+ throw e;
+ }
+ }
+ return backupId;
+ }
+
+ /**
+ * Prepare and submit Backup request
+ * @param backupId : backup_timestame (something like backup_1398729212626)
+ * @param backupType : full or incremental
+ * @param tableList : tables to be backuped
+ * @param targetRootDir : specified by user
+ * @param snapshot : use existing snapshot if specified by user (for future jira)
+ * @throws IOException exception
+ */
+ protected static void requestBackup(String backupId, BackupType backupType, String tableList,
+ String targetRootDir, String snapshot) throws IOException {
+
+ Configuration conf = getConf();
+ BackupManager backupManager = null;
+ BackupContext backupContext = null;
+ if (snapshot != null) {
+ LOG.warn("Snapshot option specified, backup type and table option will be ignored,\n"
+ + "full backup will be taken based on the given snapshot.");
+ throw new IOException("backup using existing Snapshot will be implemented in future jira");
+ }
+
+ HBaseAdmin hbadmin = null;
+ Connection conn = null;
+ try {
+
+ backupManager = new BackupManager(conf);
+ String tables = tableList;
+ if (backupType == BackupType.INCREMENTAL) {
+ Set incrTableSet = backupManager.getIncrementalBackupTableSet();
+ if (incrTableSet.isEmpty()) {
+ LOG.warn("Incremental backup table set contains no table.\n"
+ + "Use 'backup create full' or 'backup stop' to \n "
+ + "change the tables covered by incremental backup.");
+ throw new RuntimeException("No table covered by incremental backup.");
+ }
+ StringBuilder sb = new StringBuilder();
+ for (String tableName : incrTableSet) {
+ sb.append(tableName + " ");
+ }
+ LOG.info("Incremental backup for the following table set: " + sb.toString());
+ tables =
+ sb.toString().trim()
+ .replaceAll(" ", BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND);
+ }
+
+ // check whether table exists first before starting real request
+ if (tables != null) {
+ String[] tableNames = tables.split(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND);
+ ArrayList noneExistingTableList = null;
+ conn = ConnectionFactory.createConnection(conf);
+ hbadmin = (HBaseAdmin) conn.getAdmin();
+ for (String tableName : tableNames) {
+ if (!hbadmin.tableExists(TableName.valueOf(tableName))) {
+ if (noneExistingTableList == null) {
+ noneExistingTableList = new ArrayList();
+ }
+ noneExistingTableList.add(tableName);
+ }
+ }
+ if (noneExistingTableList != null) {
+ if (backupType == BackupType.INCREMENTAL ) {
+ LOG.warn("Incremental backup table set contains non-exising table: "
+ + noneExistingTableList);
+ } else {
+ // Throw exception only in full mode - we try to backup non-existing table
+ throw new RuntimeException("Non-existing tables found in the table list: "
+ + noneExistingTableList);
+ }
+ }
+ }
+
+ // if any target table backup dir already exist, then no backup action taken
+ String[] tableNames = null;
+ if (tables != null && !tables.equals("")) {
+ tableNames = tables.split(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND);
+ }
+ if (tableNames != null && tableNames.length > 0) {
+ for (String table : tableNames) {
+ String targetTableBackupDir =
+ HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table);
+ Path targetTableBackupDirPath = new Path(targetTableBackupDir);
+ FileSystem outputFs = FileSystem.get(targetTableBackupDirPath.toUri(), conf);
+ if (outputFs.exists(targetTableBackupDirPath)) {
+ throw new IOException("Target backup directory " + targetTableBackupDir
+ + " exists already.");
+ }
+ }
+ }
+ backupContext =
+ backupManager.createBackupContext(backupId, backupType, tables, targetRootDir, snapshot);
+ backupManager.initialize();
+ backupManager.dispatchRequest(backupContext);
+ } catch (BackupException e) {
+ // suppress the backup exception wrapped within #initialize or #dispatchRequest, backup
+ // exception has already been handled normally
+ StackTraceElement[] stes = e.getStackTrace();
+ for (StackTraceElement ste : stes) {
+ LOG.info(ste);
+ }
+ LOG.error("Backup Exception " + e.getMessage());
+ } finally {
+ if (hbadmin != null) {
+ hbadmin.close();
+ }
+ if (conn != null) {
+ conn.close();
+ }
+ }
+ }
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCommands.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCommands.java
new file mode 100644
index 0000000..56ec215
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCommands.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * General backup commands, options and usage messages
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+final class BackupCommands {
+
+ private static final String USAGE = "Usage: hbase backup COMMAND\n"
+ + "where COMMAND is one of:\n" + " create create a new backup image\n"
+ + "Enter \'help COMMAND\' to see help message for each command\n";
+
+ private static final String CREATE_CMD_USAGE =
+ "Usage: hbase backup create [tables] [-s name] [-convert] "
+ + "[-silent]\n" + " type \"full\" to create a full backup image;\n"
+ + " \"incremental\" to create an incremental backup image\n"
+ + " backup_root_path The full root path to store the backup image,\n"
+ + " the prefix can be gpfs, hdfs or webhdfs\n" + " Options:\n"
+ + " tables If no tables (\"\") are specified, all tables are backed up. "
+ + "Otherwise it is a\n" + " comma separated list of tables.\n"
+ + " -s name Use the specified snapshot for full backup\n"
+ + " -convert For an incremental backup, convert WAL files to HFiles\n";
+
+ interface Command {
+ void execute() throws IOException;
+ }
+
+ private BackupCommands() {
+ throw new AssertionError("Instantiating utility class...");
+ }
+
+ static Command createCommand(BackupCommand type, CommandLine cmdline) {
+ Command cmd = null;
+ switch (type) {
+ case CREATE:
+ cmd = new CreateCommand(cmdline);
+ break;
+ case HELP:
+ default:
+ cmd = new HelpCommand(cmdline);
+ break;
+ }
+ return cmd;
+ }
+
+ private static class CreateCommand implements Command {
+ CommandLine cmdline;
+
+ CreateCommand(CommandLine cmdline) {
+ this.cmdline = cmdline;
+ }
+
+ @Override
+ public void execute() throws IOException {
+ if (cmdline == null || cmdline.getArgs() == null) {
+ System.out.println("ERROR: missing arguments");
+ System.out.println(CREATE_CMD_USAGE);
+ System.exit(-1);
+ }
+ String[] args = cmdline.getArgs();
+ if (args.length < 2 || args.length > 3) {
+ System.out.println("ERROR: wrong number of arguments");
+ System.out.println(CREATE_CMD_USAGE);
+ System.exit(-1);
+ }
+
+ if (!BackupType.FULL.toString().equalsIgnoreCase(args[0])
+ && !BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[0])) {
+ System.out.println("ERROR: invalid backup type");
+ System.out.println(CREATE_CMD_USAGE);
+ System.exit(-1);
+ }
+
+ String snapshot = cmdline.hasOption('s') ? cmdline.getOptionValue('s') : null;
+ String tables = (args.length == 3) ? args[2] : null;
+
+ try {
+ BackupClient.create(BackupType.valueOf(args[0].toUpperCase()), args[1], tables, snapshot);
+ } catch (RuntimeException e) {
+ System.out.println("ERROR: " + e.getMessage());
+ System.exit(-1);
+ }
+ }
+ }
+
+ private static class HelpCommand implements Command {
+ CommandLine cmdline;
+
+ HelpCommand(CommandLine cmdline) {
+ this.cmdline = cmdline;
+ }
+
+ @Override
+ public void execute() throws IOException {
+ if (cmdline == null) {
+ System.out.println(USAGE);
+ System.exit(0);
+ }
+
+ String[] args = cmdline.getArgs();
+ if (args == null || args.length == 0) {
+ System.out.println(USAGE);
+ System.exit(0);
+ }
+
+ if (args.length != 1) {
+ System.out.println("Only support check help message of a single command type");
+ System.out.println(USAGE);
+ System.exit(0);
+ }
+
+ String type = args[0];
+
+ if (BackupCommand.CREATE.name().equalsIgnoreCase(type)) {
+ System.out.println(CREATE_CMD_USAGE);
+ } // other commands will be supported in future jira
+ System.exit(0);
+ }
+ }
+
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupContext.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupContext.java
new file mode 100644
index 0000000..92dbb7a
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupContext.java
@@ -0,0 +1,423 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.protobuf.generated.BackupProtos;
+import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder;
+import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus;
+
+/**
+ * An object to encapsulate the information for each backup request
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupContext implements Serializable {
+
+ public Map getBackupStatusMap() {
+ return backupStatusMap;
+ }
+
+ public void setBackupStatusMap(Map backupStatusMap) {
+ this.backupStatusMap = backupStatusMap;
+ }
+
+ public HashMap> getTableSetTimestampMap() {
+ return tableSetTimestampMap;
+ }
+
+ public void setTableSetTimestampMap(HashMap> tableSetTimestampMap) {
+ this.tableSetTimestampMap = tableSetTimestampMap;
+ }
+
+ public String getHlogTargetDir() {
+ return hlogTargetDir;
+ }
+
+ public void setType(BackupType type) {
+ this.type = type;
+ }
+
+ public void setTargetRootDir(String targetRootDir) {
+ this.targetRootDir = targetRootDir;
+ }
+
+ public void setTotalBytesCopied(long totalBytesCopied) {
+ this.totalBytesCopied = totalBytesCopied;
+ }
+
+ public void setCancelled(boolean cancelled) {
+ this.cancelled = cancelled;
+ }
+
+ private static final long serialVersionUID = 2401435114454300992L;
+
+ // backup id: a timestamp when we request the backup
+ private String backupId;
+
+ // backup type, full or incremental
+ private BackupType type;
+
+ // target root directory for storing the backup files
+ private String targetRootDir;
+
+ // overall backup status
+ private BackupHandler.BackupState flag;
+
+ // overall backup phase
+ private BackupHandler.BackupPhase phase;
+
+ // overall backup failure message
+ private String failedMsg;
+
+ // backup status map for all tables
+ private Map backupStatusMap;
+
+ // actual start timestamp of the backup process
+ private long startTs;
+
+ // actual end timestamp of the backup process, could be fail or complete
+ private long endTs;
+
+ // the total bytes of incremental logs copied
+ private long totalBytesCopied;
+
+ // for incremental backup, the location of the backed-up hlogs
+ private String hlogTargetDir = null;
+
+ // incremental backup file list
+ transient private List incrBackupFileList;
+
+ // new region server log timestamps for table set after distributed log roll
+ // key - table name, value - map of RegionServer hostname -> last log rolled timestamp
+ transient private HashMap> tableSetTimestampMap;
+
+ // cancel flag
+ private boolean cancelled = false;
+ // backup progress string
+
+ private String progress;
+
+ public BackupContext() {
+ }
+
+ public BackupContext(String backupId, BackupType type, String[] tables, String targetRootDir,
+ String snapshot) {
+ super();
+
+ if (backupStatusMap == null) {
+ backupStatusMap = new HashMap();
+ }
+
+ this.backupId = backupId;
+ this.type = type;
+ this.targetRootDir = targetRootDir;
+
+ this.addTables(tables);
+
+ if (type == BackupType.INCREMENTAL) {
+ setHlogTargetDir(HBackupFileSystem.getLogBackupDir(targetRootDir, backupId));
+ }
+
+ this.startTs = 0;
+ this.endTs = 0;
+
+ }
+
+ /**
+ * Set progress string
+ * @param msg progress message
+ */
+
+ public void setProgress(String msg) {
+ this.progress = msg;
+ }
+
+ /**
+ * Get current progress msg
+ */
+ public String getProgress() {
+ return progress;
+ }
+
+ /**
+ * Mark cancel flag.
+ */
+ public void markCancel() {
+ this.cancelled = true;
+ }
+
+ /**
+ * Has been marked as cancelled or not.
+ * @return True if marked as cancelled
+ */
+ public boolean isCancelled() {
+ return this.cancelled;
+ }
+
+ public String getBackupId() {
+ return backupId;
+ }
+
+ public void setBackupId(String backupId) {
+ this.backupId = backupId;
+ }
+
+ public BackupStatus getBackupStatus(String table) {
+ return this.backupStatusMap.get(table);
+ }
+
+ public String getFailedMsg() {
+ return failedMsg;
+ }
+
+ public void setFailedMsg(String failedMsg) {
+ this.failedMsg = failedMsg;
+ }
+
+ public long getStartTs() {
+ return startTs;
+ }
+
+ public void setStartTs(long startTs) {
+ this.startTs = startTs;
+ }
+
+ public long getEndTs() {
+ return endTs;
+ }
+
+ public void setEndTs(long endTs) {
+ this.endTs = endTs;
+ }
+
+ public long getTotalBytesCopied() {
+ return totalBytesCopied;
+ }
+
+ public BackupHandler.BackupState getFlag() {
+ return flag;
+ }
+
+ public void setFlag(BackupHandler.BackupState flag) {
+ this.flag = flag;
+ }
+
+ public BackupHandler.BackupPhase getPhase() {
+ return phase;
+ }
+
+ public void setPhase(BackupHandler.BackupPhase phase) {
+ this.phase = phase;
+ }
+
+ public BackupType getType() {
+ return type;
+ }
+
+ public void setSnapshotName(String table, String snapshotName) {
+ this.backupStatusMap.get(table).setSnapshotName(snapshotName);
+ }
+
+ public String getSnapshotName(String table) {
+ return this.backupStatusMap.get(table).getSnapshotName();
+ }
+
+ public List getSnapshotNames() {
+ List snapshotNames = new ArrayList();
+ for (BackupStatus backupStatus : this.backupStatusMap.values()) {
+ snapshotNames.add(backupStatus.getSnapshotName());
+ }
+ return snapshotNames;
+ }
+
+ public Set getTables() {
+ return this.backupStatusMap.keySet();
+ }
+
+ public String getTableListAsString() {
+ return BackupUtil.concat(backupStatusMap.keySet(), ";");
+ }
+
+ public void addTables(String[] tables) {
+ for (String table : tables) {
+ BackupStatus backupStatus = new BackupStatus(table, this.targetRootDir, this.backupId);
+ this.backupStatusMap.put(table, backupStatus);
+ }
+ }
+
+ public String getTargetRootDir() {
+ return targetRootDir;
+ }
+
+ public void setHlogTargetDir(String hlogTagetDir) {
+ this.hlogTargetDir = hlogTagetDir;
+ }
+
+ public String getHLogTargetDir() {
+ return hlogTargetDir;
+ }
+
+ public List getIncrBackupFileList() {
+ return incrBackupFileList;
+ }
+
+ public List setIncrBackupFileList(List incrBackupFileList) {
+ this.incrBackupFileList = incrBackupFileList;
+ return this.incrBackupFileList;
+ }
+
+ /**
+ * Set the new region server log timestamps after distributed log roll
+ * @param newTableSetTimestampMap table timestamp map
+ */
+ public void setIncrTimestampMap(HashMap> newTableSetTimestampMap) {
+ this.tableSetTimestampMap = newTableSetTimestampMap;
+ }
+
+ /**
+ * Get new region server log timestamps after distributed log roll
+ * @return new region server log timestamps
+ */
+ public HashMap> getIncrTimestampMap() {
+ return this.tableSetTimestampMap;
+ }
+
+ /**
+ * Get existing snapshot if backing up from existing snapshot.
+ * @return The existing snapshot, null if not backing up from existing snapshot
+ */
+ public String getExistingSnapshot() {
+ // this feature will be supported in another Jira
+ return null;
+ }
+
+ /**
+ * Check whether this backup context are for backing up from existing snapshot or not.
+ * @return true if it is for backing up from existing snapshot, otherwise false
+ */
+ public boolean fromExistingSnapshot() {
+ // this feature will be supported in later jiras
+ return false;
+ }
+
+ public String getTableBySnapshot(String snapshotName) {
+ for (Entry entry : this.backupStatusMap.entrySet()) {
+ if (snapshotName.equals(entry.getValue().getSnapshotName())) {
+ return entry.getKey();
+ }
+ }
+ return null;
+ }
+
+ public byte[] toByteArray() throws IOException {
+ BackupProtos.BackupContext.Builder builder =
+ BackupProtos.BackupContext.newBuilder();
+ builder.setBackupId(getBackupId());
+ setBackupStatusMap(builder);
+ builder.setCancelled(isCancelled());
+ builder.setEndTs(getEndTs());
+ if(getFailedMsg() != null){
+ builder.setFailedMessage(getFailedMsg());
+ }
+ if(getFlag() != null){
+ builder.setFlag(getFlag().toString());
+ }
+ if(getPhase() != null){
+ builder.setPhase(getPhase().toString());
+ }
+ if(getHLogTargetDir() != null){
+ builder.setHLogTargeDir(getHLogTargetDir());
+ }
+
+ if(getProgress() != null){
+ builder.setProgress(getProgress());
+ }
+ builder.setStartTs(getStartTs());
+ builder.setTargetRootDir(getTargetRootDir());
+ builder.setTotalBytesCopied(getTotalBytesCopied());
+ builder.setType(getType().toString());
+ byte[] data = builder.build().toByteArray();
+ return data;
+
+ }
+
+ private void setBackupStatusMap(Builder builder) {
+ int index = 0;
+ for(String key: backupStatusMap.keySet()) {
+ BackupProtos.TableBackupStatus.Builder tbsBuilder =
+ BackupProtos.TableBackupStatus.newBuilder();
+ BackupStatus bs = backupStatusMap.get(key);
+ tbsBuilder.setBackupStatus(bs.toProto());
+ tbsBuilder.setTableName(key);
+ builder.addBackupStatusMap(index++, tbsBuilder.build());
+ }
+ }
+
+ public static BackupContext fromByteArray(byte[] data)
+ throws IOException, ClassNotFoundException {
+
+ BackupContext context = new BackupContext();
+ BackupProtos.BackupContext proto = BackupProtos.BackupContext.parseFrom(data);
+ context.setBackupId(proto.getBackupId());
+ context.setBackupStatusMap(toMap(proto.getBackupStatusMapList()));
+ context.setCancelled(proto.hasCancelled());
+ context.setEndTs(proto.getEndTs());
+ if(proto.hasFailedMessage()) {
+ context.setFailedMsg(proto.getFailedMessage());
+ }
+ if(proto.hasFlag()) {
+ context.setFlag(BackupHandler.BackupState.valueOf(proto.getFlag()));
+ }
+ if(proto.hasHLogTargeDir()) {
+ context.setHlogTargetDir(proto.getHLogTargeDir());
+ }
+ if(proto.hasPhase()) {
+ context.setPhase(BackupHandler.BackupPhase.valueOf(proto.getPhase()));
+ }
+ if(proto.hasProgress()) {
+ context.setProgress(proto.getProgress());
+ }
+ context.setStartTs(proto.getStartTs());
+ context.setTargetRootDir(proto.getTargetRootDir());
+ context.setTotalBytesCopied(proto.getTotalBytesCopied());
+ context.setType(BackupType.valueOf(proto.getType()));
+ return context;
+ }
+
+ private static Map toMap(List list) {
+ HashMap map = new HashMap();
+ for(TableBackupStatus tbs : list){
+ map.put(tbs.getTableName(), BackupStatus.convert(tbs.getBackupStatus()));
+ }
+ return map;
+ }
+
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyService.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyService.java
new file mode 100644
index 0000000..223746d
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyService.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface BackupCopyService extends Configurable {
+ static enum Type {
+ FULL, INCREMENTAL
+ }
+
+ public int copy(BackupHandler backupHandler, Configuration conf, BackupCopyService.Type copyType,
+ String[] options) throws IOException;
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupException.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupException.java
new file mode 100644
index 0000000..fe0d0e2
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupException.java
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * Backup exception
+ */
+@SuppressWarnings("serial")
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupException extends HBaseIOException {
+ private BackupContext description;
+
+ /**
+ * Some exception happened for a backup and don't even know the backup that it was about
+ * @param msg Full description of the failure
+ */
+ public BackupException(String msg) {
+ super(msg);
+ }
+
+ /**
+ * Some exception happened for a backup with a cause
+ * @param cause the cause
+ */
+ public BackupException(Throwable cause) {
+ super(cause);
+ }
+
+ /**
+ * Exception for the given backup that has no previous root cause
+ * @param msg reason why the backup failed
+ * @param desc description of the backup that is being failed
+ */
+ public BackupException(String msg, BackupContext desc) {
+ super(msg);
+ this.description = desc;
+ }
+
+ /**
+ * Exception for the given backup due to another exception
+ * @param msg reason why the backup failed
+ * @param cause root cause of the failure
+ * @param desc description of the backup that is being failed
+ */
+ public BackupException(String msg, Throwable cause, BackupContext desc) {
+ super(msg, cause);
+ this.description = desc;
+ }
+
+ /**
+ * Exception when the description of the backup cannot be determined, due to some other root
+ * cause
+ * @param message description of what caused the failure
+ * @param e root cause
+ */
+ public BackupException(String message, Exception e) {
+ super(message, e);
+ }
+
+ public BackupContext getBackupContext() {
+ return this.description;
+ }
+
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHandler.java
new file mode 100644
index 0000000..70e5134
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHandler.java
@@ -0,0 +1,755 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.concurrent.Callable;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupManifest.BackupImage;
+import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.zookeeper.KeeperException.NoNodeException;
+
+/**
+ * A Handler to carry the operations of backup progress
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BackupHandler implements Callable