diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java
index 1a7a1ba..acd8889 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java
@@ -9,6 +9,270 @@ public final class BackupProtos {
com.google.protobuf.ExtensionRegistry registry) {
}
/**
+ * Protobuf enum {@code hbase.pb.FullTableBackupState}
+ */
+ public enum FullTableBackupState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * PRE_SNAPSHOT_TABLE = 1;
+ */
+ PRE_SNAPSHOT_TABLE(0, 1),
+ /**
+ * SNAPSHOT_TABLES = 2;
+ */
+ SNAPSHOT_TABLES(1, 2),
+ /**
+ * SNAPSHOT_COPY = 3;
+ */
+ SNAPSHOT_COPY(2, 3),
+ /**
+ * BACKUP_COMPLETE = 4;
+ */
+ BACKUP_COMPLETE(3, 4),
+ ;
+
+ /**
+ * PRE_SNAPSHOT_TABLE = 1;
+ */
+ public static final int PRE_SNAPSHOT_TABLE_VALUE = 1;
+ /**
+ * SNAPSHOT_TABLES = 2;
+ */
+ public static final int SNAPSHOT_TABLES_VALUE = 2;
+ /**
+ * SNAPSHOT_COPY = 3;
+ */
+ public static final int SNAPSHOT_COPY_VALUE = 3;
+ /**
+ * BACKUP_COMPLETE = 4;
+ */
+ public static final int BACKUP_COMPLETE_VALUE = 4;
+
+
+ public final int getNumber() { return value; }
+
+ public static FullTableBackupState valueOf(int value) {
+ switch (value) {
+ case 1: return PRE_SNAPSHOT_TABLE;
+ case 2: return SNAPSHOT_TABLES;
+ case 3: return SNAPSHOT_COPY;
+ case 4: return BACKUP_COMPLETE;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public FullTableBackupState findValueByNumber(int number) {
+ return FullTableBackupState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(0);
+ }
+
+ private static final FullTableBackupState[] VALUES = values();
+
+ public static FullTableBackupState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private FullTableBackupState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:hbase.pb.FullTableBackupState)
+ }
+
+ /**
+ * Protobuf enum {@code hbase.pb.IncrementalTableBackupState}
+ */
+ public enum IncrementalTableBackupState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * PREPARE_INCREMENTAL = 1;
+ */
+ PREPARE_INCREMENTAL(0, 1),
+ /**
+ * INCREMENTAL_COPY = 2;
+ */
+ INCREMENTAL_COPY(1, 2),
+ /**
+ * INCR_BACKUP_COMPLETE = 3;
+ */
+ INCR_BACKUP_COMPLETE(2, 3),
+ ;
+
+ /**
+ * PREPARE_INCREMENTAL = 1;
+ */
+ public static final int PREPARE_INCREMENTAL_VALUE = 1;
+ /**
+ * INCREMENTAL_COPY = 2;
+ */
+ public static final int INCREMENTAL_COPY_VALUE = 2;
+ /**
+ * INCR_BACKUP_COMPLETE = 3;
+ */
+ public static final int INCR_BACKUP_COMPLETE_VALUE = 3;
+
+
+ public final int getNumber() { return value; }
+
+ public static IncrementalTableBackupState valueOf(int value) {
+ switch (value) {
+ case 1: return PREPARE_INCREMENTAL;
+ case 2: return INCREMENTAL_COPY;
+ case 3: return INCR_BACKUP_COMPLETE;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public IncrementalTableBackupState findValueByNumber(int number) {
+ return IncrementalTableBackupState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(1);
+ }
+
+ private static final IncrementalTableBackupState[] VALUES = values();
+
+ public static IncrementalTableBackupState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private IncrementalTableBackupState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:hbase.pb.IncrementalTableBackupState)
+ }
+
+ /**
+ * Protobuf enum {@code hbase.pb.SnapshotTableState}
+ */
+ public enum SnapshotTableState
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * SNAPSHOT_TABLE = 1;
+ */
+ SNAPSHOT_TABLE(0, 1),
+ ;
+
+ /**
+ * SNAPSHOT_TABLE = 1;
+ */
+ public static final int SNAPSHOT_TABLE_VALUE = 1;
+
+
+ public final int getNumber() { return value; }
+
+ public static SnapshotTableState valueOf(int value) {
+ switch (value) {
+ case 1: return SNAPSHOT_TABLE;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public SnapshotTableState findValueByNumber(int number) {
+ return SnapshotTableState.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(2);
+ }
+
+ private static final SnapshotTableState[] VALUES = values();
+
+ public static SnapshotTableState valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private SnapshotTableState(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:hbase.pb.SnapshotTableState)
+ }
+
+ /**
* Protobuf enum {@code hbase.pb.BackupType}
*/
public enum BackupType
@@ -41,53 +305,787 @@ public final class BackupProtos {
case 1: return INCREMENTAL;
default: return null;
}
- }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public BackupType findValueByNumber(int number) {
+ return BackupType.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(3);
+ }
+
+ private static final BackupType[] VALUES = values();
+
+ public static BackupType valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private BackupType(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:hbase.pb.BackupType)
+ }
+
+ public interface SnapshotTableStateDataOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .hbase.pb.TableName table = 1;
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ boolean hasTable();
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable();
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder();
+
+ // required string snapshotName = 2;
+ /**
+ * required string snapshotName = 2;
+ */
+ boolean hasSnapshotName();
+ /**
+ * required string snapshotName = 2;
+ */
+ java.lang.String getSnapshotName();
+ /**
+ * required string snapshotName = 2;
+ */
+ com.google.protobuf.ByteString
+ getSnapshotNameBytes();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.SnapshotTableStateData}
+ */
+ public static final class SnapshotTableStateData extends
+ com.google.protobuf.GeneratedMessage
+ implements SnapshotTableStateDataOrBuilder {
+ // Use SnapshotTableStateData.newBuilder() to construct.
+ private SnapshotTableStateData(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private SnapshotTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final SnapshotTableStateData defaultInstance;
+ public static SnapshotTableStateData getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public SnapshotTableStateData getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private SnapshotTableStateData(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = table_.toBuilder();
+ }
+ table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(table_);
+ table_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ snapshotName_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public SnapshotTableStateData parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new SnapshotTableStateData(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .hbase.pb.TableName table = 1;
+ public static final int TABLE_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_;
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public boolean hasTable() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() {
+ return table_;
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() {
+ return table_;
+ }
+
+ // required string snapshotName = 2;
+ public static final int SNAPSHOTNAME_FIELD_NUMBER = 2;
+ private java.lang.Object snapshotName_;
+ /**
+ * required string snapshotName = 2;
+ */
+ public boolean hasSnapshotName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string snapshotName = 2;
+ */
+ public java.lang.String getSnapshotName() {
+ java.lang.Object ref = snapshotName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ if (bs.isValidUtf8()) {
+ snapshotName_ = s;
+ }
+ return s;
+ }
+ }
+ /**
+ * required string snapshotName = 2;
+ */
+ public com.google.protobuf.ByteString
+ getSnapshotNameBytes() {
+ java.lang.Object ref = snapshotName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ snapshotName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private void initFields() {
+ table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ snapshotName_ = "";
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasTable()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasSnapshotName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTable().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, table_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, getSnapshotNameBytes());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, table_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, getSnapshotNameBytes());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData) obj;
+
+ boolean result = true;
+ result = result && (hasTable() == other.hasTable());
+ if (hasTable()) {
+ result = result && getTable()
+ .equals(other.getTable());
+ }
+ result = result && (hasSnapshotName() == other.hasSnapshotName());
+ if (hasSnapshotName()) {
+ result = result && getSnapshotName()
+ .equals(other.getSnapshotName());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasTable()) {
+ hash = (37 * hash) + TABLE_FIELD_NUMBER;
+ hash = (53 * hash) + getTable().hashCode();
+ }
+ if (hasSnapshotName()) {
+ hash = (37 * hash) + SNAPSHOTNAME_FIELD_NUMBER;
+ hash = (53 * hash) + getSnapshotName().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.SnapshotTableStateData}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateDataOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getTableFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (tableBuilder_ == null) {
+ table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ } else {
+ tableBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ snapshotName_ = "";
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData build() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (tableBuilder_ == null) {
+ result.table_ = table_;
+ } else {
+ result.table_ = tableBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.snapshotName_ = snapshotName_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.getDefaultInstance()) return this;
+ if (other.hasTable()) {
+ mergeTable(other.getTable());
+ }
+ if (other.hasSnapshotName()) {
+ bitField0_ |= 0x00000002;
+ snapshotName_ = other.snapshotName_;
+ onChanged();
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
- public static com.google.protobuf.Internal.EnumLiteMap
- internalGetValueMap() {
- return internalValueMap;
- }
- private static com.google.protobuf.Internal.EnumLiteMap
- internalValueMap =
- new com.google.protobuf.Internal.EnumLiteMap() {
- public BackupType findValueByNumber(int number) {
- return BackupType.valueOf(number);
- }
- };
+ public final boolean isInitialized() {
+ if (!hasTable()) {
+
+ return false;
+ }
+ if (!hasSnapshotName()) {
+
+ return false;
+ }
+ if (!getTable().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
- public final com.google.protobuf.Descriptors.EnumValueDescriptor
- getValueDescriptor() {
- return getDescriptor().getValues().get(index);
- }
- public final com.google.protobuf.Descriptors.EnumDescriptor
- getDescriptorForType() {
- return getDescriptor();
- }
- public static final com.google.protobuf.Descriptors.EnumDescriptor
- getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(0);
- }
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
- private static final BackupType[] VALUES = values();
+ // required .hbase.pb.TableName table = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableBuilder_;
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public boolean hasTable() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() {
+ if (tableBuilder_ == null) {
+ return table_;
+ } else {
+ return tableBuilder_.getMessage();
+ }
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ table_ = value;
+ onChanged();
+ } else {
+ tableBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public Builder setTable(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableBuilder_ == null) {
+ table_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+ table_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial();
+ } else {
+ table_ = value;
+ }
+ onChanged();
+ } else {
+ tableBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public Builder clearTable() {
+ if (tableBuilder_ == null) {
+ table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ onChanged();
+ } else {
+ tableBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getTableFieldBuilder().getBuilder();
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() {
+ if (tableBuilder_ != null) {
+ return tableBuilder_.getMessageOrBuilder();
+ } else {
+ return table_;
+ }
+ }
+ /**
+ * required .hbase.pb.TableName table = 1;
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableFieldBuilder() {
+ if (tableBuilder_ == null) {
+ tableBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ table_,
+ getParentForChildren(),
+ isClean());
+ table_ = null;
+ }
+ return tableBuilder_;
+ }
- public static BackupType valueOf(
- com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
- if (desc.getType() != getDescriptor()) {
- throw new java.lang.IllegalArgumentException(
- "EnumValueDescriptor is not for this type.");
+ // required string snapshotName = 2;
+ private java.lang.Object snapshotName_ = "";
+ /**
+ * required string snapshotName = 2;
+ */
+ public boolean hasSnapshotName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * required string snapshotName = 2;
+ */
+ public java.lang.String getSnapshotName() {
+ java.lang.Object ref = snapshotName_;
+ if (!(ref instanceof java.lang.String)) {
+ java.lang.String s = ((com.google.protobuf.ByteString) ref)
+ .toStringUtf8();
+ snapshotName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * required string snapshotName = 2;
+ */
+ public com.google.protobuf.ByteString
+ getSnapshotNameBytes() {
+ java.lang.Object ref = snapshotName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ snapshotName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * required string snapshotName = 2;
+ */
+ public Builder setSnapshotName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ snapshotName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * required string snapshotName = 2;
+ */
+ public Builder clearSnapshotName() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ snapshotName_ = getDefaultInstance().getSnapshotName();
+ onChanged();
+ return this;
+ }
+ /**
+ * required string snapshotName = 2;
+ */
+ public Builder setSnapshotNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ snapshotName_ = value;
+ onChanged();
+ return this;
}
- return VALUES[desc.getIndex()];
- }
- private final int index;
- private final int value;
+ // @@protoc_insertion_point(builder_scope:hbase.pb.SnapshotTableStateData)
+ }
- private BackupType(int index, int value) {
- this.index = index;
- this.value = value;
+ static {
+ defaultInstance = new SnapshotTableStateData(true);
+ defaultInstance.initFields();
}
- // @@protoc_insertion_point(enum_scope:hbase.pb.BackupType)
+ // @@protoc_insertion_point(class_scope:hbase.pb.SnapshotTableStateData)
}
public interface BackupImageOrBuilder
@@ -9010,6 +10008,11 @@ public final class BackupProtos {
}
private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_SnapshotTableStateData_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_BackupImage_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -9048,83 +10051,98 @@ public final class BackupProtos {
descriptor;
static {
java.lang.String[] descriptorData = {
- "\n\014Backup.proto\022\010hbase.pb\032\013HBase.proto\"\327\001" +
- "\n\013BackupImage\022\021\n\tbackup_id\030\001 \002(\t\022)\n\013back" +
- "up_type\030\002 \002(\0162\024.hbase.pb.BackupType\022\020\n\010r" +
- "oot_dir\030\003 \002(\t\022\'\n\ntable_list\030\004 \003(\0132\023.hbas" +
- "e.pb.TableName\022\020\n\010start_ts\030\005 \002(\004\022\023\n\013comp" +
- "lete_ts\030\006 \002(\004\022(\n\tancestors\030\007 \003(\0132\025.hbase" +
- ".pb.BackupImage\"4\n\017ServerTimestamp\022\016\n\006se" +
- "rver\030\001 \002(\t\022\021\n\ttimestamp\030\002 \002(\004\"o\n\024TableSe" +
- "rverTimestamp\022\"\n\005table\030\001 \002(\0132\023.hbase.pb." +
- "TableName\0223\n\020server_timestamp\030\002 \003(\0132\031.hb",
- "ase.pb.ServerTimestamp\"\313\002\n\016BackupManifes" +
- "t\022\017\n\007version\030\001 \002(\t\022\021\n\tbackup_id\030\002 \002(\t\022\"\n" +
- "\004type\030\003 \002(\0162\024.hbase.pb.BackupType\022\'\n\ntab" +
- "le_list\030\004 \003(\0132\023.hbase.pb.TableName\022\020\n\010st" +
- "art_ts\030\005 \002(\004\022\023\n\013complete_ts\030\006 \002(\004\022\023\n\013tot" +
- "al_bytes\030\007 \002(\003\022\021\n\tlog_bytes\030\010 \001(\003\022/\n\007tst" +
- "_map\030\t \003(\0132\036.hbase.pb.TableServerTimesta" +
- "mp\0225\n\026dependent_backup_image\030\n \003(\0132\025.hba" +
- "se.pb.BackupImage\022\021\n\tcompacted\030\013 \002(\010\"]\n\021" +
- "TableBackupStatus\022\"\n\005table\030\001 \002(\0132\023.hbase",
- ".pb.TableName\022\022\n\ntarget_dir\030\002 \002(\t\022\020\n\010sna" +
- "pshot\030\003 \001(\t\"\323\004\n\rBackupContext\022\021\n\tbackup_" +
- "id\030\001 \002(\t\022\"\n\004type\030\002 \002(\0162\024.hbase.pb.Backup" +
- "Type\022\027\n\017target_root_dir\030\003 \002(\t\0222\n\005state\030\004" +
- " \001(\0162#.hbase.pb.BackupContext.BackupStat" +
- "e\0222\n\005phase\030\005 \001(\0162#.hbase.pb.BackupContex" +
- "t.BackupPhase\022\026\n\016failed_message\030\006 \001(\t\0228\n" +
- "\023table_backup_status\030\007 \003(\0132\033.hbase.pb.Ta" +
- "bleBackupStatus\022\020\n\010start_ts\030\010 \001(\004\022\016\n\006end" +
- "_ts\030\t \001(\004\022\032\n\022total_bytes_copied\030\n \001(\003\022\027\n",
- "\017hlog_target_dir\030\013 \001(\t\022\020\n\010progress\030\014 \001(\r" +
- "\"P\n\013BackupState\022\013\n\007WAITING\020\000\022\013\n\007RUNNING\020" +
- "\001\022\014\n\010COMPLETE\020\002\022\n\n\006FAILED\020\003\022\r\n\tCANCELLED" +
- "\020\004\"}\n\013BackupPhase\022\013\n\007REQUEST\020\000\022\014\n\010SNAPSH" +
- "OT\020\001\022\027\n\023PREPARE_INCREMENTAL\020\002\022\020\n\014SNAPSHO" +
- "TCOPY\020\003\022\024\n\020INCREMENTAL_COPY\020\004\022\022\n\016STORE_M" +
- "ANIFEST\020\005*\'\n\nBackupType\022\010\n\004FULL\020\000\022\017\n\013INC" +
- "REMENTAL\020\001BB\n*org.apache.hadoop.hbase.pr" +
- "otobuf.generatedB\014BackupProtosH\001\210\001\001\240\001\001"
+ "\n\014Backup.proto\022\010hbase.pb\032\013HBase.proto\"R\n" +
+ "\026SnapshotTableStateData\022\"\n\005table\030\001 \002(\0132\023" +
+ ".hbase.pb.TableName\022\024\n\014snapshotName\030\002 \002(" +
+ "\t\"\327\001\n\013BackupImage\022\021\n\tbackup_id\030\001 \002(\t\022)\n\013" +
+ "backup_type\030\002 \002(\0162\024.hbase.pb.BackupType\022" +
+ "\020\n\010root_dir\030\003 \002(\t\022\'\n\ntable_list\030\004 \003(\0132\023." +
+ "hbase.pb.TableName\022\020\n\010start_ts\030\005 \002(\004\022\023\n\013" +
+ "complete_ts\030\006 \002(\004\022(\n\tancestors\030\007 \003(\0132\025.h" +
+ "base.pb.BackupImage\"4\n\017ServerTimestamp\022\016" +
+ "\n\006server\030\001 \002(\t\022\021\n\ttimestamp\030\002 \002(\004\"o\n\024Tab",
+ "leServerTimestamp\022\"\n\005table\030\001 \002(\0132\023.hbase" +
+ ".pb.TableName\0223\n\020server_timestamp\030\002 \003(\0132" +
+ "\031.hbase.pb.ServerTimestamp\"\313\002\n\016BackupMan" +
+ "ifest\022\017\n\007version\030\001 \002(\t\022\021\n\tbackup_id\030\002 \002(" +
+ "\t\022\"\n\004type\030\003 \002(\0162\024.hbase.pb.BackupType\022\'\n" +
+ "\ntable_list\030\004 \003(\0132\023.hbase.pb.TableName\022\020" +
+ "\n\010start_ts\030\005 \002(\004\022\023\n\013complete_ts\030\006 \002(\004\022\023\n" +
+ "\013total_bytes\030\007 \002(\003\022\021\n\tlog_bytes\030\010 \001(\003\022/\n" +
+ "\007tst_map\030\t \003(\0132\036.hbase.pb.TableServerTim" +
+ "estamp\0225\n\026dependent_backup_image\030\n \003(\0132\025",
+ ".hbase.pb.BackupImage\022\021\n\tcompacted\030\013 \002(\010" +
+ "\"]\n\021TableBackupStatus\022\"\n\005table\030\001 \002(\0132\023.h" +
+ "base.pb.TableName\022\022\n\ntarget_dir\030\002 \002(\t\022\020\n" +
+ "\010snapshot\030\003 \001(\t\"\323\004\n\rBackupContext\022\021\n\tbac" +
+ "kup_id\030\001 \002(\t\022\"\n\004type\030\002 \002(\0162\024.hbase.pb.Ba" +
+ "ckupType\022\027\n\017target_root_dir\030\003 \002(\t\0222\n\005sta" +
+ "te\030\004 \001(\0162#.hbase.pb.BackupContext.Backup" +
+ "State\0222\n\005phase\030\005 \001(\0162#.hbase.pb.BackupCo" +
+ "ntext.BackupPhase\022\026\n\016failed_message\030\006 \001(" +
+ "\t\0228\n\023table_backup_status\030\007 \003(\0132\033.hbase.p",
+ "b.TableBackupStatus\022\020\n\010start_ts\030\010 \001(\004\022\016\n" +
+ "\006end_ts\030\t \001(\004\022\032\n\022total_bytes_copied\030\n \001(" +
+ "\003\022\027\n\017hlog_target_dir\030\013 \001(\t\022\020\n\010progress\030\014" +
+ " \001(\r\"P\n\013BackupState\022\013\n\007WAITING\020\000\022\013\n\007RUNN" +
+ "ING\020\001\022\014\n\010COMPLETE\020\002\022\n\n\006FAILED\020\003\022\r\n\tCANCE" +
+ "LLED\020\004\"}\n\013BackupPhase\022\013\n\007REQUEST\020\000\022\014\n\010SN" +
+ "APSHOT\020\001\022\027\n\023PREPARE_INCREMENTAL\020\002\022\020\n\014SNA" +
+ "PSHOTCOPY\020\003\022\024\n\020INCREMENTAL_COPY\020\004\022\022\n\016STO" +
+ "RE_MANIFEST\020\005*k\n\024FullTableBackupState\022\026\n" +
+ "\022PRE_SNAPSHOT_TABLE\020\001\022\023\n\017SNAPSHOT_TABLES",
+ "\020\002\022\021\n\rSNAPSHOT_COPY\020\003\022\023\n\017BACKUP_COMPLETE" +
+ "\020\004*f\n\033IncrementalTableBackupState\022\027\n\023PRE" +
+ "PARE_INCREMENTAL\020\001\022\024\n\020INCREMENTAL_COPY\020\002" +
+ "\022\030\n\024INCR_BACKUP_COMPLETE\020\003*(\n\022SnapshotTa" +
+ "bleState\022\022\n\016SNAPSHOT_TABLE\020\001*\'\n\nBackupTy" +
+ "pe\022\010\n\004FULL\020\000\022\017\n\013INCREMENTAL\020\001BB\n*org.apa" +
+ "che.hadoop.hbase.protobuf.generatedB\014Bac" +
+ "kupProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
- internal_static_hbase_pb_BackupImage_descriptor =
+ internal_static_hbase_pb_SnapshotTableStateData_descriptor =
getDescriptor().getMessageTypes().get(0);
+ internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_SnapshotTableStateData_descriptor,
+ new java.lang.String[] { "Table", "SnapshotName", });
+ internal_static_hbase_pb_BackupImage_descriptor =
+ getDescriptor().getMessageTypes().get(1);
internal_static_hbase_pb_BackupImage_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_BackupImage_descriptor,
new java.lang.String[] { "BackupId", "BackupType", "RootDir", "TableList", "StartTs", "CompleteTs", "Ancestors", });
internal_static_hbase_pb_ServerTimestamp_descriptor =
- getDescriptor().getMessageTypes().get(1);
+ getDescriptor().getMessageTypes().get(2);
internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ServerTimestamp_descriptor,
new java.lang.String[] { "Server", "Timestamp", });
internal_static_hbase_pb_TableServerTimestamp_descriptor =
- getDescriptor().getMessageTypes().get(2);
+ getDescriptor().getMessageTypes().get(3);
internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_TableServerTimestamp_descriptor,
new java.lang.String[] { "Table", "ServerTimestamp", });
internal_static_hbase_pb_BackupManifest_descriptor =
- getDescriptor().getMessageTypes().get(3);
+ getDescriptor().getMessageTypes().get(4);
internal_static_hbase_pb_BackupManifest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_BackupManifest_descriptor,
new java.lang.String[] { "Version", "BackupId", "Type", "TableList", "StartTs", "CompleteTs", "TotalBytes", "LogBytes", "TstMap", "DependentBackupImage", "Compacted", });
internal_static_hbase_pb_TableBackupStatus_descriptor =
- getDescriptor().getMessageTypes().get(4);
+ getDescriptor().getMessageTypes().get(5);
internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_TableBackupStatus_descriptor,
new java.lang.String[] { "Table", "TargetDir", "Snapshot", });
internal_static_hbase_pb_BackupContext_descriptor =
- getDescriptor().getMessageTypes().get(5);
+ getDescriptor().getMessageTypes().get(6);
internal_static_hbase_pb_BackupContext_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_BackupContext_descriptor,
diff --git a/hbase-protocol/src/main/protobuf/Backup.proto b/hbase-protocol/src/main/protobuf/Backup.proto
index 383b990..a30dcf2 100644
--- a/hbase-protocol/src/main/protobuf/Backup.proto
+++ b/hbase-protocol/src/main/protobuf/Backup.proto
@@ -27,6 +27,27 @@ option optimize_for = SPEED;
import "HBase.proto";
+enum FullTableBackupState {
+ PRE_SNAPSHOT_TABLE = 1;
+ SNAPSHOT_TABLES = 2;
+ SNAPSHOT_COPY = 3;
+ BACKUP_COMPLETE = 4;
+}
+
+enum IncrementalTableBackupState {
+ PREPARE_INCREMENTAL = 1;
+ INCREMENTAL_COPY = 2;
+ INCR_BACKUP_COMPLETE = 3;
+}
+
+enum SnapshotTableState {
+ SNAPSHOT_TABLE = 1;
+}
+message SnapshotTableStateData {
+ required TableName table = 1;
+ required string snapshotName = 2;
+}
+
enum BackupType {
FULL = 0;
INCREMENTAL = 1;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java
index 1be0c3b..d436931 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.hbase.backup.impl;
import java.io.IOException;
+import java.io.InputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
@@ -308,7 +309,7 @@ public class BackupContext {
return null;
}
- public byte[] toByteArray() throws IOException {
+ BackupProtos.BackupContext toBackupContext() {
BackupProtos.BackupContext.Builder builder =
BackupProtos.BackupContext.newBuilder();
builder.setBackupId(getBackupId());
@@ -332,8 +333,11 @@ public class BackupContext {
builder.setTargetRootDir(getTargetRootDir());
builder.setTotalBytesCopied(getTotalBytesCopied());
builder.setType(BackupProtos.BackupType.valueOf(getType().name()));
- byte[] data = builder.build().toByteArray();
- return data;
+ return builder.build();
+ }
+
+ public byte[] toByteArray() throws IOException {
+ return toBackupContext().toByteArray();
}
private void setBackupStatusMap(Builder builder) {
@@ -343,9 +347,15 @@ public class BackupContext {
}
public static BackupContext fromByteArray(byte[] data) throws IOException {
+ return fromProto(BackupProtos.BackupContext.parseFrom(data));
+ }
+
+ public static BackupContext fromStream(final InputStream stream) throws IOException {
+ return fromProto(BackupProtos.BackupContext.parseDelimitedFrom(stream));
+ }
+ static BackupContext fromProto(BackupProtos.BackupContext proto) {
BackupContext context = new BackupContext();
- BackupProtos.BackupContext proto = BackupProtos.BackupContext.parseFrom(data);
context.setBackupId(proto.getBackupId());
context.setBackupStatusMap(toMap(proto.getTableBackupStatusList()));
context.setEndTs(proto.getEndTs());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupProcedure.java
new file mode 100644
index 0000000..5ccdd71
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupProcedure.java
@@ -0,0 +1,630 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.HBackupFileSystem;
+import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupPhase;
+import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState;
+import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
+import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.FullTableBackupState;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+@InterfaceAudience.Private
+public class FullTableBackupProcedure
+ extends StateMachineProcedure {
+ private static final Log LOG = LogFactory.getLog(SnapshotTableProcedure.class);
+
+ private final AtomicBoolean aborted = new AtomicBoolean(false);
+ private Configuration conf;
+ private String backupId;
+ private List tableList;
+ private String targetRootDir;
+
+ private BackupManager backupManager;
+ private BackupContext backupContext;
+
+ public FullTableBackupProcedure() {
+ // Required by the Procedure framework to create the procedure on replay
+ }
+
+ public FullTableBackupProcedure(final MasterProcedureEnv env,
+ final String backupId, BackupType type,
+ List tableList, String targetRootDir)
+ throws IOException {
+ backupManager = new BackupManager(env.getMasterConfiguration());
+ this.backupId = backupId;
+ this.tableList = tableList;
+ this.targetRootDir = targetRootDir;
+ backupContext =
+ backupManager.createBackupContext(backupId, type, tableList, targetRootDir);
+ }
+
+ /**
+ * Begin the overall backup.
+ * @param backupContext backup context
+ * @throws IOException exception
+ */
+ static void beginBackup(BackupManager backupManager, BackupContext backupContext)
+ throws IOException {
+ // set the start timestamp of the overall backup
+ long startTs = EnvironmentEdgeManager.currentTime();
+ backupContext.setStartTs(startTs);
+ // set overall backup status: ongoing
+ backupContext.setState(BackupState.RUNNING);
+ LOG.info("Backup " + backupContext.getBackupId() + " started at " + startTs + ".");
+
+ backupManager.updateBackupStatus(backupContext);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Backup session " + backupContext.getBackupId() + " has been started.");
+ }
+ }
+
+ private static String getMessage(Exception e) {
+ String msg = e.getMessage();
+ if (msg == null || msg.equals("")) {
+ msg = e.getClass().getName();
+ }
+ return msg;
+ }
+
+ /**
+ * Delete HBase snapshot for backup.
+ * @param backupCtx backup context
+ * @throws Exception exception
+ */
+ private static void deleteSnapshot(BackupContext backupCtx, Configuration conf)
+ throws IOException {
+ LOG.debug("Trying to delete snapshot for full backup.");
+ Connection conn = null;
+ Admin admin = null;
+ try {
+ conn = ConnectionFactory.createConnection(conf);
+ admin = conn.getAdmin();
+ for (String snapshotName : backupCtx.getSnapshotNames()) {
+ if (snapshotName == null) {
+ continue;
+ }
+ LOG.debug("Trying to delete snapshot: " + snapshotName);
+ admin.deleteSnapshot(snapshotName);
+ LOG.debug("Deleting the snapshot " + snapshotName + " for backup "
+ + backupCtx.getBackupId() + " succeeded.");
+ }
+ } finally {
+ if (admin != null) {
+ admin.close();
+ }
+ if (conn != null) {
+ conn.close();
+ }
+ }
+ }
+
+ /**
+ * Clean up directories with prefix "exportSnapshot-", which are generated when exporting
+ * snapshots.
+ * @throws IOException exception
+ */
+ private static void cleanupExportSnapshotLog(Configuration conf) throws IOException {
+ FileSystem fs = FSUtils.getCurrentFileSystem(conf);
+ Path stagingDir =
+ new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory()
+ .toString()));
+ FileStatus[] files = FSUtils.listStatus(fs, stagingDir);
+ if (files == null) {
+ return;
+ }
+ for (FileStatus file : files) {
+ if (file.getPath().getName().startsWith("exportSnapshot-")) {
+ LOG.debug("Delete log files of exporting snapshot: " + file.getPath().getName());
+ if (FSUtils.delete(fs, file.getPath(), true) == false) {
+ LOG.warn("Can not delete " + file.getPath());
+ }
+ }
+ }
+ }
+
+ /**
+ * Clean up the uncompleted data at target directory if the ongoing backup has already entered the
+ * copy phase.
+ */
+ private static void cleanupTargetDir(BackupContext backupContext, Configuration conf) {
+ try {
+ // clean up the uncompleted data at target directory if the ongoing backup has already entered
+ // the copy phase
+ LOG.debug("Trying to cleanup up target dir. Current backup phase: "
+ + backupContext.getPhase());
+ if (backupContext.getPhase().equals(BackupPhase.SNAPSHOTCOPY)
+ || backupContext.getPhase().equals(BackupPhase.INCREMENTAL_COPY)
+ || backupContext.getPhase().equals(BackupPhase.STORE_MANIFEST)) {
+ FileSystem outputFs =
+ FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf);
+
+ // now treat one backup as a transaction, clean up data that has been partially copied at
+ // table level
+ for (TableName table : backupContext.getTables()) {
+ Path targetDirPath =
+ new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(),
+ backupContext.getBackupId(), table));
+ if (outputFs.delete(targetDirPath, true)) {
+ LOG.info("Cleaning up uncompleted backup data at " + targetDirPath.toString()
+ + " done.");
+ } else {
+ LOG.info("No data has been copied to " + targetDirPath.toString() + ".");
+ }
+
+ Path tableDir = targetDirPath.getParent();
+ FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir);
+ if (backups == null || backups.length == 0) {
+ outputFs.delete(tableDir, true);
+ LOG.debug(tableDir.toString() + " is empty, remove it.");
+ }
+ }
+ }
+
+ } catch (IOException e1) {
+ LOG.error("Cleaning up uncompleted backup data of " + backupContext.getBackupId() + " at "
+ + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + ".");
+ }
+ }
+
+ /**
+ * Fail the overall backup.
+ * @param backupContext backup context
+ * @param e exception
+ * @throws Exception exception
+ */
+ static void failBackup(BackupContext backupContext, BackupManager backupManager, Exception e,
+ String msg, BackupType type, Configuration conf) throws IOException {
+ LOG.error(msg + getMessage(e));
+ // If this is a cancel exception, then we've already cleaned.
+
+ if (backupContext.getState().equals(BackupState.CANCELLED)) {
+ return;
+ }
+
+ // set the failure timestamp of the overall backup
+ backupContext.setEndTs(EnvironmentEdgeManager.currentTime());
+
+ // set failure message
+ backupContext.setFailedMsg(e.getMessage());
+
+ // set overall backup status: failed
+ backupContext.setState(BackupState.FAILED);
+
+ // compose the backup failed data
+ String backupFailedData =
+ "BackupId=" + backupContext.getBackupId() + ",startts=" + backupContext.getStartTs()
+ + ",failedts=" + backupContext.getEndTs() + ",failedphase=" + backupContext.getPhase()
+ + ",failedmessage=" + backupContext.getFailedMsg();
+ LOG.error(backupFailedData);
+
+ backupManager.updateBackupStatus(backupContext);
+
+ // if full backup, then delete HBase snapshots if there already are snapshots taken
+ // and also clean up export snapshot log files if exist
+ if (type == BackupType.FULL) {
+ deleteSnapshot(backupContext, conf);
+ cleanupExportSnapshotLog(conf);
+ }
+
+ // clean up the uncompleted data at target directory if the ongoing backup has already entered
+ // the copy phase
+ // For incremental backup, DistCp logs will be cleaned with the targetDir.
+ cleanupTargetDir(backupContext, conf);
+
+ LOG.info("Backup " + backupContext.getBackupId() + " failed.");
+ }
+
+ /**
+ * Do snapshot copy.
+ * @param backupContext backup context
+ * @throws Exception exception
+ */
+ private void snapshotCopy(BackupContext backupContext) throws Exception {
+ LOG.info("Snapshot copy is starting.");
+
+ // set overall backup phase: snapshot_copy
+ backupContext.setPhase(BackupPhase.SNAPSHOTCOPY);
+
+ // avoid action if has been cancelled
+ if (backupContext.isCancelled()) {
+ return;
+ }
+
+ // call ExportSnapshot to copy files based on hbase snapshot for backup
+ // ExportSnapshot only support single snapshot export, need loop for multiple tables case
+ BackupCopyService copyService = BackupRestoreFactory.getBackupCopyService(conf);
+
+ // number of snapshots matches number of tables
+ float numOfSnapshots = backupContext.getSnapshotNames().size();
+
+ LOG.debug("There are " + (int) numOfSnapshots + " snapshots to be copied.");
+
+ for (TableName table : backupContext.getTables()) {
+ // Currently we simply set the sub copy tasks by counting the table snapshot number, we can
+ // calculate the real files' size for the percentage in the future.
+ // TODO this below
+ // backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots);
+ int res = 0;
+ String[] args = new String[4];
+ args[0] = "-snapshot";
+ args[1] = backupContext.getSnapshotName(table);
+ args[2] = "-copy-to";
+ args[3] = backupContext.getBackupStatus(table).getTargetDir();
+
+ LOG.debug("Copy snapshot " + args[1] + " to " + args[3]);
+ res = copyService.copy(backupContext, backupManager, conf, BackupCopyService.Type.FULL, args);
+ // if one snapshot export failed, do not continue for remained snapshots
+ if (res != 0) {
+ LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + ".");
+
+ throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3]
+ + " with reason code " + res);
+ }
+
+ LOG.info("Snapshot copy " + args[1] + " finished.");
+ }
+ }
+
+ /**
+ * Add manifest for the current backup. The manifest is stored
+ * within the table backup directory.
+ * @param backupContext The current backup context
+ * @throws IOException exception
+ * @throws BackupException exception
+ */
+ private static void addManifest(BackupContext backupContext, BackupManager backupManager,
+ BackupType type, Configuration conf) throws IOException, BackupException {
+ // set the overall backup phase : store manifest
+ backupContext.setPhase(BackupPhase.STORE_MANIFEST);
+
+ // avoid action if has been cancelled
+ if (backupContext.isCancelled()) {
+ return;
+ }
+
+ BackupManifest manifest;
+
+ // Since we have each table's backup in its own directory structure,
+ // we'll store its manifest with the table directory.
+ for (TableName table : backupContext.getTables()) {
+ manifest = new BackupManifest(backupContext, table);
+ ArrayList ancestors = backupManager.getAncestors(backupContext, table);
+ for (BackupImage image : ancestors) {
+ manifest.addDependentImage(image);
+ }
+
+ if (type == BackupType.INCREMENTAL) {
+ // We'll store the log timestamps for this table only in its manifest.
+ HashMap> tableTimestampMap =
+ new HashMap>();
+ tableTimestampMap.put(table, backupContext.getIncrTimestampMap().get(table));
+ manifest.setIncrTimestampMap(tableTimestampMap);
+ }
+ manifest.store(conf);
+ }
+
+ // For incremental backup, we store a overall manifest in
+ // /WALs/
+ // This is used when created the next incremental backup
+ if (type == BackupType.INCREMENTAL) {
+ manifest = new BackupManifest(backupContext);
+ // set the table region server start and end timestamps for incremental backup
+ manifest.setIncrTimestampMap(backupContext.getIncrTimestampMap());
+ ArrayList ancestors = backupManager.getAncestors(backupContext);
+ for (BackupImage image : ancestors) {
+ manifest.addDependentImage(image);
+ }
+ manifest.store(conf);
+ }
+ }
+
+ /**
+ * Get backup request meta data dir as string.
+ * @param backupContext backup context
+ * @return meta data dir
+ */
+ private static String obtainBackupMetaDataStr(BackupContext backupContext) {
+ StringBuffer sb = new StringBuffer();
+ sb.append("type=" + backupContext.getType() + ",tablelist=");
+ for (TableName table : backupContext.getTables()) {
+ sb.append(table + ";");
+ }
+ if (sb.lastIndexOf(";") > 0) {
+ sb.delete(sb.lastIndexOf(";"), sb.lastIndexOf(";") + 1);
+ }
+ sb.append(",targetRootDir=" + backupContext.getTargetRootDir());
+
+ return sb.toString();
+ }
+
+ /**
+ * Clean up directories with prefix "_distcp_logs-", which are generated when DistCp copying
+ * hlogs.
+ * @throws IOException exception
+ */
+ private static void cleanupDistCpLog(BackupContext backupContext, Configuration conf)
+ throws IOException {
+ Path rootPath = new Path(backupContext.getHLogTargetDir()).getParent();
+ FileSystem fs = FileSystem.get(rootPath.toUri(), conf);
+ FileStatus[] files = FSUtils.listStatus(fs, rootPath);
+ if (files == null) {
+ return;
+ }
+ for (FileStatus file : files) {
+ if (file.getPath().getName().startsWith("_distcp_logs")) {
+ LOG.debug("Delete log files of DistCp: " + file.getPath().getName());
+ FSUtils.delete(fs, file.getPath(), true);
+ }
+ }
+ }
+
+ /**
+ * Complete the overall backup.
+ * @param backupContext backup context
+ * @throws Exception exception
+ */
+ static void completeBackup(BackupContext backupContext, BackupManager backupManager,
+ BackupType type, Configuration conf) throws IOException {
+ // set the complete timestamp of the overall backup
+ backupContext.setEndTs(EnvironmentEdgeManager.currentTime());
+ // set overall backup status: complete
+ backupContext.setState(BackupState.COMPLETE);
+ // add and store the manifest for the backup
+ addManifest(backupContext, backupManager, type, conf);
+
+ // after major steps done and manifest persisted, do convert if needed for incremental backup
+ /* in-fly convert code here, provided by future jira */
+ LOG.debug("in-fly convert code here, provided by future jira");
+
+ // compose the backup complete data
+ String backupCompleteData =
+ obtainBackupMetaDataStr(backupContext) + ",startts=" + backupContext.getStartTs()
+ + ",completets=" + backupContext.getEndTs() + ",bytescopied="
+ + backupContext.getTotalBytesCopied();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Backup " + backupContext.getBackupId() + " finished: " + backupCompleteData);
+ }
+ backupManager.updateBackupStatus(backupContext);
+
+ // when full backup is done:
+ // - delete HBase snapshot
+ // - clean up directories with prefix "exportSnapshot-", which are generated when exporting
+ // snapshots
+ if (type == BackupType.FULL) {
+ deleteSnapshot(backupContext, conf);
+ cleanupExportSnapshotLog(conf);
+ } else if (type == BackupType.INCREMENTAL) {
+ cleanupDistCpLog(backupContext, conf);
+ }
+
+ LOG.info("Backup " + backupContext.getBackupId() + " completed.");
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env, final FullTableBackupState state)
+ throws InterruptedException {
+ if (conf == null) {
+ conf = env.getMasterConfiguration();
+ }
+ if (backupManager == null) {
+ try {
+ backupManager = new BackupManager(env.getMasterConfiguration());
+ } catch (IOException ioe) {
+ setFailure("full backup", ioe);
+ }
+ }
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+ HashMap newTimestamps = null;
+ try {
+ switch (state) {
+ case PRE_SNAPSHOT_TABLE:
+ beginBackup(backupManager, backupContext);
+ String savedStartCode = null;
+ boolean firstBackup = false;
+ // do snapshot for full table backup
+
+ try {
+ savedStartCode = backupManager.readBackupStartCode();
+ firstBackup = savedStartCode == null;
+ if (firstBackup) {
+ // This is our first backup. Let's put some marker on ZK so that we can hold the logs
+ // while we do the backup.
+ backupManager.writeBackupStartCode(0L);
+ }
+ // We roll log here before we do the snapshot. It is possible there is duplicate data
+ // in the log that is already in the snapshot. But if we do it after the snapshot, we
+ // could have data loss.
+ // A better approach is to do the roll log on each RS in the same global procedure as
+ // the snapshot.
+ LOG.info("Execute roll log procedure for full backup ...");
+ Admin admin = ConnectionFactory.createConnection(env.getMasterConfiguration())
+ .getAdmin();
+ admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
+ LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, new HashMap());
+ newTimestamps = backupManager.readRegionServerLastLogRollResult();
+ if (firstBackup) {
+ // Updates registered log files
+ // We record ALL old WAL files as registered, because
+ // this is a first full backup in the system and these
+ // files are not needed for next incremental backup
+ List logFiles = BackupUtil.getWALFilesOlderThan(conf, newTimestamps);
+ backupManager.recordWALFiles(logFiles);
+ }
+ } catch (BackupException e) {
+ // fail the overall backup and return
+ failBackup(backupContext, backupManager, e, "Unexpected BackupException : ",
+ BackupType.FULL, conf);
+ return null;
+ }
+ setNextState(FullTableBackupState.SNAPSHOT_TABLES);
+ break;
+ case SNAPSHOT_TABLES:
+ for (TableName tableName : tableList) {
+ addChildProcedure(new SnapshotTableProcedure(env, tableName,
+ "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_"
+ + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString()));
+ }
+ setNextState(FullTableBackupState.SNAPSHOT_COPY);
+ break;
+ case SNAPSHOT_COPY:
+ // do snapshot copy
+ try {
+ this.snapshotCopy(backupContext);
+ } catch (Exception e) {
+ // fail the overall backup and return
+ failBackup(backupContext, backupManager, e, "Unexpected BackupException : ",
+ BackupType.FULL, conf);
+ return null;
+ }
+ // Updates incremental backup table set
+ backupManager.addIncrementalBackupTableSet(backupContext.getTables());
+ setNextState(FullTableBackupState.BACKUP_COMPLETE);
+ break;
+
+ case BACKUP_COMPLETE:
+ // set overall backup status: complete. Here we make sure to complete the backup. After this
+ // checkpoint, even if entering cancel process, will let the backup finished
+ backupContext.setState(BackupState.COMPLETE);
+ // The table list in backupContext is good for both full backup and incremental backup.
+ // For incremental backup, it contains the incremental backup table set.
+ backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps);
+
+ HashMap> newTableSetTimestampMap =
+ backupManager.readLogTimestampMap();
+
+ Long newStartCode =
+ BackupUtil.getMinValue(BackupUtil.getRSLogTimestampMins(newTableSetTimestampMap));
+ backupManager.writeBackupStartCode(newStartCode);
+
+ // backup complete
+ completeBackup(backupContext, backupManager, BackupType.FULL, conf);
+ return Flow.NO_MORE_STATE;
+
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (IOException e) {
+ setFailure("snapshot-table", e);
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final FullTableBackupState state)
+ throws IOException {
+ }
+
+ @Override
+ protected FullTableBackupState getState(final int stateId) {
+ return FullTableBackupState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final FullTableBackupState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected FullTableBackupState getInitialState() {
+ return FullTableBackupState.PRE_SNAPSHOT_TABLE;
+ }
+
+ @Override
+ protected void setNextState(final FullTableBackupState state) {
+ if (aborted.get()) {
+ setAbortFailure("snapshot-table", "abort requested");
+ } else {
+ super.setNextState(state);
+ }
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ aborted.set(true);
+ return true;
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (targetRootDir=");
+ sb.append(targetRootDir);
+ sb.append(")");
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ backupContext.toBackupContext().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ backupContext = BackupContext.fromStream(stream);
+ backupId = backupContext.getBackupId();
+ targetRootDir = backupContext.getTargetRootDir();
+ tableList = backupContext.getTableNames();
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ /*
+ if (env.waitInitialized(this)) {
+ return false;
+ } */
+ return env.getProcedureQueue().tryAcquireTableExclusiveLock(this,
+ BackupSystemTable.getTableName());
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ env.getProcedureQueue().releaseTableExclusiveLock(this, BackupSystemTable.getTableName());
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupProcedure.java
new file mode 100644
index 0000000..26dcd61
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupProcedure.java
@@ -0,0 +1,286 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
+import org.apache.hadoop.hbase.backup.BackupType;
+import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupPhase;
+import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.BackupProtos;
+import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.IncrementalTableBackupState;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+@InterfaceAudience.Private
+public class IncrementalTableBackupProcedure
+ extends StateMachineProcedure {
+ private static final Log LOG = LogFactory.getLog(SnapshotTableProcedure.class);
+
+ private final AtomicBoolean aborted = new AtomicBoolean(false);
+ private Configuration conf;
+ private String backupId;
+ private List tableList;
+ private String targetRootDir;
+
+ private BackupManager backupManager;
+ private BackupContext backupContext;
+
+ public IncrementalTableBackupProcedure() {
+ // Required by the Procedure framework to create the procedure on replay
+ }
+
+ public IncrementalTableBackupProcedure(final MasterProcedureEnv env,
+ final String backupId, BackupType type,
+ List tableList, String targetRootDir)
+ throws IOException {
+ backupManager = new BackupManager(env.getMasterConfiguration());
+ this.backupId = backupId;
+ this.tableList = tableList;
+ this.targetRootDir = targetRootDir;
+ backupContext =
+ backupManager.createBackupContext(backupId, type, tableList, targetRootDir);
+ }
+
+ private List filterMissingFiles(List incrBackupFileList) throws IOException {
+ FileSystem fs = FileSystem.get(conf);
+ List list = new ArrayList();
+ for(String file : incrBackupFileList){
+ if(fs.exists(new Path(file))){
+ list.add(file);
+ } else{
+ LOG.warn("Can't find file: "+file);
+ }
+ }
+ return list;
+ }
+
+ /**
+ * Do incremental copy.
+ * @param backupContext backup context
+ */
+ private void incrementalCopy(BackupContext backupContext) throws Exception {
+
+ LOG.info("Incremental copy is starting.");
+
+ // set overall backup phase: incremental_copy
+ backupContext.setPhase(BackupPhase.INCREMENTAL_COPY);
+
+ // avoid action if has been cancelled
+ if (backupContext.isCancelled()) {
+ return;
+ }
+
+ // get incremental backup file list and prepare parms for DistCp
+ List incrBackupFileList = backupContext.getIncrBackupFileList();
+ // filter missing files out (they have been copied by previous backups)
+ incrBackupFileList = filterMissingFiles(incrBackupFileList);
+ String[] strArr = incrBackupFileList.toArray(new String[incrBackupFileList.size() + 1]);
+ strArr[strArr.length - 1] = backupContext.getHLogTargetDir();
+
+ BackupCopyService copyService = BackupRestoreFactory.getBackupCopyService(conf);
+ int res = copyService.copy(backupContext, backupManager, conf,
+ BackupCopyService.Type.INCREMENTAL, strArr);
+
+ if (res != 0) {
+ LOG.error("Copy incremental log files failed with return code: " + res + ".");
+ throw new IOException("Failed of Hadoop Distributed Copy from " + incrBackupFileList + " to "
+ + backupContext.getHLogTargetDir());
+ }
+ LOG.info("Incremental copy from " + incrBackupFileList + " to "
+ + backupContext.getHLogTargetDir() + " finished.");
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env, final IncrementalTableBackupState state)
+ throws InterruptedException {
+ if (conf == null) {
+ conf = env.getMasterConfiguration();
+ }
+ if (backupManager == null) {
+ try {
+ backupManager = new BackupManager(env.getMasterConfiguration());
+ } catch (IOException ioe) {
+ setFailure("incremental backup", ioe);
+ }
+ }
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+ HashMap newTimestamps = null;
+ try {
+ switch (state) {
+ case PREPARE_INCREMENTAL:
+ FullTableBackupProcedure.beginBackup(backupManager, backupContext);
+ LOG.debug("For incremental backup, current table set is "
+ + backupManager.getIncrementalBackupTableSet());
+ try {
+ IncrementalBackupManager incrBackupManager = new IncrementalBackupManager(backupManager);
+
+ newTimestamps = incrBackupManager.getIncrBackupLogFileList(backupContext);
+ } catch (Exception e) {
+ // fail the overall backup and return
+ FullTableBackupProcedure.failBackup(backupContext, backupManager, e,
+ "Unexpected Exception : ", BackupType.INCREMENTAL, conf);
+ }
+
+ setNextState(IncrementalTableBackupState.INCREMENTAL_COPY);
+ break;
+ case INCREMENTAL_COPY:
+ try {
+ // copy out the table and region info files for each table
+ BackupUtil.copyTableRegionInfo(backupContext, conf);
+ incrementalCopy(backupContext);
+ // Save list of WAL files copied
+ backupManager.recordWALFiles(backupContext.getIncrBackupFileList());
+ } catch (Exception e) {
+ // fail the overall backup and return
+ FullTableBackupProcedure.failBackup(backupContext, backupManager, e,
+ "Unexpected exception doing incremental copy : ", BackupType.INCREMENTAL, conf);
+ }
+ setNextState(IncrementalTableBackupState.INCR_BACKUP_COMPLETE);
+ break;
+ case INCR_BACKUP_COMPLETE:
+ // set overall backup status: complete. Here we make sure to complete the backup. After this
+ // checkpoint, even if entering cancel process, will let the backup finished
+ backupContext.setState(BackupState.COMPLETE);
+ // Set the previousTimestampMap which is before this current log roll to the manifest.
+ HashMap> previousTimestampMap =
+ backupManager.readLogTimestampMap();
+ backupContext.setIncrTimestampMap(previousTimestampMap);
+
+ // The table list in backupContext is good for both full backup and incremental backup.
+ // For incremental backup, it contains the incremental backup table set.
+ backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps);
+
+ HashMap> newTableSetTimestampMap =
+ backupManager.readLogTimestampMap();
+
+ Long newStartCode =
+ BackupUtil.getMinValue(BackupUtil.getRSLogTimestampMins(newTableSetTimestampMap));
+ backupManager.writeBackupStartCode(newStartCode);
+ // backup complete
+ FullTableBackupProcedure.completeBackup(backupContext, backupManager,
+ BackupType.INCREMENTAL, conf);
+ return Flow.NO_MORE_STATE;
+
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (IOException e) {
+ setFailure("snapshot-table", e);
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final IncrementalTableBackupState state)
+ throws IOException {
+ }
+
+ @Override
+ protected IncrementalTableBackupState getState(final int stateId) {
+ return IncrementalTableBackupState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final IncrementalTableBackupState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected IncrementalTableBackupState getInitialState() {
+ return IncrementalTableBackupState.PREPARE_INCREMENTAL;
+ }
+
+ @Override
+ protected void setNextState(final IncrementalTableBackupState state) {
+ if (aborted.get()) {
+ setAbortFailure("snapshot-table", "abort requested");
+ } else {
+ super.setNextState(state);
+ }
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ aborted.set(true);
+ return true;
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (targetRootDir=");
+ sb.append(targetRootDir);
+ sb.append(")");
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ backupContext.toBackupContext().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ backupContext = BackupContext.fromStream(stream);
+ backupId = backupContext.getBackupId();
+ targetRootDir = backupContext.getTargetRootDir();
+ tableList = backupContext.getTableNames();
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ /*
+ if (env.waitInitialized(this)) {
+ return false;
+ } */
+ return env.getProcedureQueue().tryAcquireTableExclusiveLock(this,
+ BackupSystemTable.getTableName());
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ env.getProcedureQueue().releaseTableExclusiveLock(this, BackupSystemTable.getTableName());
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/SnapshotTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/SnapshotTableProcedure.java
new file mode 100644
index 0000000..8d8da2c
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/SnapshotTableProcedure.java
@@ -0,0 +1,189 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.backup.impl;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.BackupProtos;
+import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableState;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+
+@InterfaceAudience.Private
+public class SnapshotTableProcedure
+ extends StateMachineProcedure {
+ private static final Log LOG = LogFactory.getLog(SnapshotTableProcedure.class);
+
+ private final AtomicBoolean aborted = new AtomicBoolean(false);
+ private TableName tableName;
+ private String snapshotName;
+
+ public SnapshotTableProcedure() {
+ // Required by the Procedure framework to create the procedure on replay
+ }
+
+ public SnapshotTableProcedure(final MasterProcedureEnv env,
+ final TableName tableName, final String snapshotName)
+ throws IOException {
+ this.snapshotName = snapshotName;
+ this.tableName = tableName;
+ }
+
+ @Override
+ protected Flow executeFromState(final MasterProcedureEnv env, final SnapshotTableState state)
+ throws InterruptedException {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace(this + " execute state=" + state);
+ }
+ try {
+ switch (state) {
+ case SNAPSHOT_TABLE:
+ HBaseProtos.SnapshotDescription backupSnapshot;
+
+ // wrap a SnapshotDescription for offline/online snapshot
+ backupSnapshot = this.wrapSnapshotDescription(tableName, snapshotName);
+
+ Admin admin = ConnectionFactory.createConnection(env.getMasterConfiguration()).getAdmin();
+ // try to remove existing snapshot
+ try {
+ admin.deleteSnapshot(snapshotName);
+ } catch (IOException e) {
+ LOG.debug("Unable to delete " + snapshotName, e);
+ }
+ // Kick off snapshot for backup
+ admin.snapshot(backupSnapshot);
+ // set the snapshot name in BackupStatus of this table, only after snapshot success.
+ return Flow.NO_MORE_STATE;
+ default:
+ throw new UnsupportedOperationException("unhandled state=" + state);
+ }
+ } catch (IOException e) {
+ setFailure("snapshot-table", e);
+ }
+ return Flow.HAS_MORE_STATE;
+ }
+
+ /**
+ * Wrap a SnapshotDescription for a target table.
+ * @param table table
+ * @return a SnapshotDescription especially for backup.
+ */
+ private SnapshotDescription wrapSnapshotDescription(TableName tableName, String snapshotName) {
+ // Mock a SnapshotDescription from backupContext to call SnapshotManager function,
+ // Name it in the format "snapshot__"
+ HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder();
+ builder.setTable(tableName.getNameAsString());
+ builder.setName(snapshotName);
+ HBaseProtos.SnapshotDescription backupSnapshot = builder.build();
+
+ LOG.debug("Wrapped a SnapshotDescription " + backupSnapshot.getName()
+ + " from backupContext to request snapshot for backup.");
+
+ return backupSnapshot;
+ }
+
+ @Override
+ protected void rollbackState(final MasterProcedureEnv env, final SnapshotTableState state)
+ throws IOException {
+ }
+
+ @Override
+ protected SnapshotTableState getState(final int stateId) {
+ return SnapshotTableState.valueOf(stateId);
+ }
+
+ @Override
+ protected int getStateId(final SnapshotTableState state) {
+ return state.getNumber();
+ }
+
+ @Override
+ protected SnapshotTableState getInitialState() {
+ return SnapshotTableState.SNAPSHOT_TABLE;
+ }
+
+ @Override
+ protected void setNextState(final SnapshotTableState state) {
+ if (aborted.get()) {
+ setAbortFailure("snapshot-table", "abort requested");
+ } else {
+ super.setNextState(state);
+ }
+ }
+
+ @Override
+ public boolean abort(final MasterProcedureEnv env) {
+ aborted.set(true);
+ return true;
+ }
+
+ @Override
+ public void toStringClassDetails(StringBuilder sb) {
+ sb.append(getClass().getSimpleName());
+ sb.append(" (table=");
+ sb.append(tableName);
+ sb.append(")");
+ }
+
+ @Override
+ public void serializeStateData(final OutputStream stream) throws IOException {
+ super.serializeStateData(stream);
+
+ BackupProtos.SnapshotTableStateData.Builder state =
+ BackupProtos.SnapshotTableStateData.newBuilder()
+ .setTable(ProtobufUtil.toProtoTableName(tableName))
+ .setSnapshotName(snapshotName);
+ state.build().writeDelimitedTo(stream);
+ }
+
+ @Override
+ public void deserializeStateData(final InputStream stream) throws IOException {
+ super.deserializeStateData(stream);
+
+ BackupProtos.SnapshotTableStateData state =
+ BackupProtos.SnapshotTableStateData.parseDelimitedFrom(stream);
+ tableName = ProtobufUtil.toTableName(state.getTable());
+ snapshotName = state.getSnapshotName();
+ }
+
+ @Override
+ protected boolean acquireLock(final MasterProcedureEnv env) {
+ if (!tableName.isSystemTable() && env.waitInitialized(this)) {
+ return false;
+ }
+ return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, tableName);
+ }
+
+ @Override
+ protected void releaseLock(final MasterProcedureEnv env) {
+ env.getProcedureQueue().releaseTableExclusiveLock(this, tableName);
+ }
+}