diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index c3b524b..639709a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -913,6 +913,20 @@ public interface Admin extends Abortable, Closeable { throws IOException; /** + * Backs up given list of tables fully. Asynchronous operation. This means that + * it may be a while before all your tables are backed up. + * You can use Future.get(long, TimeUnit) to wait on the operation to complete. + * + * @param backupId identifier for the full backup + * @param tableList list of tables to backup + * @param targetRootDir root dir for saving the backup + * @return the result of the async backup. You can use Future.get(long, TimeUnit) to wait on the + * operation to complete + */ + public Future fullTableBackup(final String backupId, + final List tableList, final String targetRootDir) throws IOException; + + /** * Modify an existing table, more IRB friendly version. Asynchronous operation. This means that * it may be a while before your schema change is updated across all of the table. * You can use Future.get(long, TimeUnit) to wait on the operation to complete. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index 64eb9fb..4936207 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -1401,6 +1401,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } @Override + public MasterProtos.FullTableBackupResponse fullTableBackup( + RpcController controller, + MasterProtos.FullTableBackupRequest request) throws ServiceException { + return stub.fullTableBackup(controller, request); + } + + @Override public MasterProtos.AddColumnResponse addColumn( RpcController controller, MasterProtos.AddColumnRequest request) throws ServiceException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index c2a0bb8..844803e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -83,6 +83,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterReque import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; @@ -1568,6 +1570,45 @@ public class HBaseAdmin implements Admin { } @Override + public Future fullTableBackup(final String backupId, + final List tableList, final String targetRootDir) throws IOException { + FullTableBackupResponse response = executeCallable( + new MasterCallable(getConnection()) { + @Override + public FullTableBackupResponse call(int callTimeout) throws ServiceException { + FullTableBackupRequest request = RequestConverter.buildFullTableBackupRequest(backupId, + tableList, targetRootDir); + return master.fullTableBackup(null, request); + } + }); + return new FullTableBackupFuture(this, TableName.BACKUP_TABLE_NAME, response); + } + + private static class FullTableBackupFuture extends TableFuture { + public FullTableBackupFuture(final HBaseAdmin admin, final TableName tableName, + final FullTableBackupResponse response) { + super(admin, tableName, + (response != null && response.hasProcId()) ? response.getProcId() : null); + } + + public FullTableBackupFuture(final HBaseAdmin admin, final TableName tableName, + final Long procId) { + super(admin, tableName, procId); + } + + @Override + public String getOperationType() { + return "BACKUP"; + } + + @Override + protected Void postOperationResult(final Void result, final long deadlineTs) + throws IOException, TimeoutException { + return result; + } + } + + @Override public Future modifyTable(final TableName tableName, final HTableDescriptor htd) throws IOException { if (!tableName.equals(htd.getTableName())) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 99e993d..c85ebec 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath; @@ -1268,6 +1269,20 @@ public final class RequestConverter { return builder.build(); } + public static FullTableBackupRequest buildFullTableBackupRequest(final String backupId, + List tableList, String targetRootDir) { + FullTableBackupRequest.Builder builder = FullTableBackupRequest.newBuilder(); + builder.setBackupId(backupId); + builder.setTargetRootDir(targetRootDir); + if (tableList != null) { + int idx = 0; + for (TableName table : tableList) { + builder.setTables(idx++, ProtobufUtil.toProtoTableName(table)); + } + } + return builder.build(); + } + /** * Creates a protocol buffer GetSchemaAlterStatusRequest * diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java index 1a7a1ba..acd8889 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java @@ -9,6 +9,270 @@ public final class BackupProtos { com.google.protobuf.ExtensionRegistry registry) { } /** + * Protobuf enum {@code hbase.pb.FullTableBackupState} + */ + public enum FullTableBackupState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * PRE_SNAPSHOT_TABLE = 1; + */ + PRE_SNAPSHOT_TABLE(0, 1), + /** + * SNAPSHOT_TABLES = 2; + */ + SNAPSHOT_TABLES(1, 2), + /** + * SNAPSHOT_COPY = 3; + */ + SNAPSHOT_COPY(2, 3), + /** + * BACKUP_COMPLETE = 4; + */ + BACKUP_COMPLETE(3, 4), + ; + + /** + * PRE_SNAPSHOT_TABLE = 1; + */ + public static final int PRE_SNAPSHOT_TABLE_VALUE = 1; + /** + * SNAPSHOT_TABLES = 2; + */ + public static final int SNAPSHOT_TABLES_VALUE = 2; + /** + * SNAPSHOT_COPY = 3; + */ + public static final int SNAPSHOT_COPY_VALUE = 3; + /** + * BACKUP_COMPLETE = 4; + */ + public static final int BACKUP_COMPLETE_VALUE = 4; + + + public final int getNumber() { return value; } + + public static FullTableBackupState valueOf(int value) { + switch (value) { + case 1: return PRE_SNAPSHOT_TABLE; + case 2: return SNAPSHOT_TABLES; + case 3: return SNAPSHOT_COPY; + case 4: return BACKUP_COMPLETE; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public FullTableBackupState findValueByNumber(int number) { + return FullTableBackupState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(0); + } + + private static final FullTableBackupState[] VALUES = values(); + + public static FullTableBackupState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private FullTableBackupState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.FullTableBackupState) + } + + /** + * Protobuf enum {@code hbase.pb.IncrementalTableBackupState} + */ + public enum IncrementalTableBackupState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * PREPARE_INCREMENTAL = 1; + */ + PREPARE_INCREMENTAL(0, 1), + /** + * INCREMENTAL_COPY = 2; + */ + INCREMENTAL_COPY(1, 2), + /** + * INCR_BACKUP_COMPLETE = 3; + */ + INCR_BACKUP_COMPLETE(2, 3), + ; + + /** + * PREPARE_INCREMENTAL = 1; + */ + public static final int PREPARE_INCREMENTAL_VALUE = 1; + /** + * INCREMENTAL_COPY = 2; + */ + public static final int INCREMENTAL_COPY_VALUE = 2; + /** + * INCR_BACKUP_COMPLETE = 3; + */ + public static final int INCR_BACKUP_COMPLETE_VALUE = 3; + + + public final int getNumber() { return value; } + + public static IncrementalTableBackupState valueOf(int value) { + switch (value) { + case 1: return PREPARE_INCREMENTAL; + case 2: return INCREMENTAL_COPY; + case 3: return INCR_BACKUP_COMPLETE; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public IncrementalTableBackupState findValueByNumber(int number) { + return IncrementalTableBackupState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(1); + } + + private static final IncrementalTableBackupState[] VALUES = values(); + + public static IncrementalTableBackupState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private IncrementalTableBackupState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.IncrementalTableBackupState) + } + + /** + * Protobuf enum {@code hbase.pb.SnapshotTableState} + */ + public enum SnapshotTableState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * SNAPSHOT_TABLE = 1; + */ + SNAPSHOT_TABLE(0, 1), + ; + + /** + * SNAPSHOT_TABLE = 1; + */ + public static final int SNAPSHOT_TABLE_VALUE = 1; + + + public final int getNumber() { return value; } + + public static SnapshotTableState valueOf(int value) { + switch (value) { + case 1: return SNAPSHOT_TABLE; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public SnapshotTableState findValueByNumber(int number) { + return SnapshotTableState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(2); + } + + private static final SnapshotTableState[] VALUES = values(); + + public static SnapshotTableState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private SnapshotTableState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.SnapshotTableState) + } + + /** * Protobuf enum {@code hbase.pb.BackupType} */ public enum BackupType @@ -41,53 +305,787 @@ public final class BackupProtos { case 1: return INCREMENTAL; default: return null; } - } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public BackupType findValueByNumber(int number) { + return BackupType.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(3); + } + + private static final BackupType[] VALUES = values(); + + public static BackupType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private BackupType(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.BackupType) + } + + public interface SnapshotTableStateDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.TableName table = 1; + /** + * required .hbase.pb.TableName table = 1; + */ + boolean hasTable(); + /** + * required .hbase.pb.TableName table = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable(); + /** + * required .hbase.pb.TableName table = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder(); + + // required string snapshotName = 2; + /** + * required string snapshotName = 2; + */ + boolean hasSnapshotName(); + /** + * required string snapshotName = 2; + */ + java.lang.String getSnapshotName(); + /** + * required string snapshotName = 2; + */ + com.google.protobuf.ByteString + getSnapshotNameBytes(); + } + /** + * Protobuf type {@code hbase.pb.SnapshotTableStateData} + */ + public static final class SnapshotTableStateData extends + com.google.protobuf.GeneratedMessage + implements SnapshotTableStateDataOrBuilder { + // Use SnapshotTableStateData.newBuilder() to construct. + private SnapshotTableStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SnapshotTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SnapshotTableStateData defaultInstance; + public static SnapshotTableStateData getDefaultInstance() { + return defaultInstance; + } + + public SnapshotTableStateData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SnapshotTableStateData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = table_.toBuilder(); + } + table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(table_); + table_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + bitField0_ |= 0x00000002; + snapshotName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SnapshotTableStateData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SnapshotTableStateData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.TableName table = 1; + public static final int TABLE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_; + /** + * required .hbase.pb.TableName table = 1; + */ + public boolean hasTable() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + return table_; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + return table_; + } + + // required string snapshotName = 2; + public static final int SNAPSHOTNAME_FIELD_NUMBER = 2; + private java.lang.Object snapshotName_; + /** + * required string snapshotName = 2; + */ + public boolean hasSnapshotName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string snapshotName = 2; + */ + public java.lang.String getSnapshotName() { + java.lang.Object ref = snapshotName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + snapshotName_ = s; + } + return s; + } + } + /** + * required string snapshotName = 2; + */ + public com.google.protobuf.ByteString + getSnapshotNameBytes() { + java.lang.Object ref = snapshotName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + snapshotName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + snapshotName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTable()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSnapshotName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTable().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, table_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getSnapshotNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, table_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getSnapshotNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData) obj; + + boolean result = true; + result = result && (hasTable() == other.hasTable()); + if (hasTable()) { + result = result && getTable() + .equals(other.getTable()); + } + result = result && (hasSnapshotName() == other.hasSnapshotName()); + if (hasSnapshotName()) { + result = result && getSnapshotName() + .equals(other.getSnapshotName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTable()) { + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + } + if (hasSnapshotName()) { + hash = (37 * hash) + SNAPSHOTNAME_FIELD_NUMBER; + hash = (53 * hash) + getSnapshotName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.SnapshotTableStateData} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + snapshotName_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableBuilder_ == null) { + result.table_ = table_; + } else { + result.table_ = tableBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.snapshotName_ = snapshotName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.getDefaultInstance()) return this; + if (other.hasTable()) { + mergeTable(other.getTable()); + } + if (other.hasSnapshotName()) { + bitField0_ |= 0x00000002; + snapshotName_ = other.snapshotName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } - public static com.google.protobuf.Internal.EnumLiteMap - internalGetValueMap() { - return internalValueMap; - } - private static com.google.protobuf.Internal.EnumLiteMap - internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public BackupType findValueByNumber(int number) { - return BackupType.valueOf(number); - } - }; + public final boolean isInitialized() { + if (!hasTable()) { + + return false; + } + if (!hasSnapshotName()) { + + return false; + } + if (!getTable().isInitialized()) { + + return false; + } + return true; + } - public final com.google.protobuf.Descriptors.EnumValueDescriptor - getValueDescriptor() { - return getDescriptor().getValues().get(index); - } - public final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptorForType() { - return getDescriptor(); - } - public static final com.google.protobuf.Descriptors.EnumDescriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(0); - } + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; - private static final BackupType[] VALUES = values(); + // required .hbase.pb.TableName table = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableBuilder_; + /** + * required .hbase.pb.TableName table = 1; + */ + public boolean hasTable() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + if (tableBuilder_ == null) { + return table_; + } else { + return tableBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + onChanged(); + } else { + tableBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder setTable( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableBuilder_ == null) { + table_ = builderForValue.build(); + onChanged(); + } else { + tableBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + table_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial(); + } else { + table_ = value; + } + onChanged(); + } else { + tableBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder clearTable() { + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + if (tableBuilder_ != null) { + return tableBuilder_.getMessageOrBuilder(); + } else { + return table_; + } + } + /** + * required .hbase.pb.TableName table = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableFieldBuilder() { + if (tableBuilder_ == null) { + tableBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + table_, + getParentForChildren(), + isClean()); + table_ = null; + } + return tableBuilder_; + } - public static BackupType valueOf( - com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException( - "EnumValueDescriptor is not for this type."); + // required string snapshotName = 2; + private java.lang.Object snapshotName_ = ""; + /** + * required string snapshotName = 2; + */ + public boolean hasSnapshotName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string snapshotName = 2; + */ + public java.lang.String getSnapshotName() { + java.lang.Object ref = snapshotName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + snapshotName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string snapshotName = 2; + */ + public com.google.protobuf.ByteString + getSnapshotNameBytes() { + java.lang.Object ref = snapshotName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + snapshotName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string snapshotName = 2; + */ + public Builder setSnapshotName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + snapshotName_ = value; + onChanged(); + return this; + } + /** + * required string snapshotName = 2; + */ + public Builder clearSnapshotName() { + bitField0_ = (bitField0_ & ~0x00000002); + snapshotName_ = getDefaultInstance().getSnapshotName(); + onChanged(); + return this; + } + /** + * required string snapshotName = 2; + */ + public Builder setSnapshotNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + snapshotName_ = value; + onChanged(); + return this; } - return VALUES[desc.getIndex()]; - } - private final int index; - private final int value; + // @@protoc_insertion_point(builder_scope:hbase.pb.SnapshotTableStateData) + } - private BackupType(int index, int value) { - this.index = index; - this.value = value; + static { + defaultInstance = new SnapshotTableStateData(true); + defaultInstance.initFields(); } - // @@protoc_insertion_point(enum_scope:hbase.pb.BackupType) + // @@protoc_insertion_point(class_scope:hbase.pb.SnapshotTableStateData) } public interface BackupImageOrBuilder @@ -9010,6 +10008,11 @@ public final class BackupProtos { } private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_SnapshotTableStateData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_BackupImage_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -9048,83 +10051,98 @@ public final class BackupProtos { descriptor; static { java.lang.String[] descriptorData = { - "\n\014Backup.proto\022\010hbase.pb\032\013HBase.proto\"\327\001" + - "\n\013BackupImage\022\021\n\tbackup_id\030\001 \002(\t\022)\n\013back" + - "up_type\030\002 \002(\0162\024.hbase.pb.BackupType\022\020\n\010r" + - "oot_dir\030\003 \002(\t\022\'\n\ntable_list\030\004 \003(\0132\023.hbas" + - "e.pb.TableName\022\020\n\010start_ts\030\005 \002(\004\022\023\n\013comp" + - "lete_ts\030\006 \002(\004\022(\n\tancestors\030\007 \003(\0132\025.hbase" + - ".pb.BackupImage\"4\n\017ServerTimestamp\022\016\n\006se" + - "rver\030\001 \002(\t\022\021\n\ttimestamp\030\002 \002(\004\"o\n\024TableSe" + - "rverTimestamp\022\"\n\005table\030\001 \002(\0132\023.hbase.pb." + - "TableName\0223\n\020server_timestamp\030\002 \003(\0132\031.hb", - "ase.pb.ServerTimestamp\"\313\002\n\016BackupManifes" + - "t\022\017\n\007version\030\001 \002(\t\022\021\n\tbackup_id\030\002 \002(\t\022\"\n" + - "\004type\030\003 \002(\0162\024.hbase.pb.BackupType\022\'\n\ntab" + - "le_list\030\004 \003(\0132\023.hbase.pb.TableName\022\020\n\010st" + - "art_ts\030\005 \002(\004\022\023\n\013complete_ts\030\006 \002(\004\022\023\n\013tot" + - "al_bytes\030\007 \002(\003\022\021\n\tlog_bytes\030\010 \001(\003\022/\n\007tst" + - "_map\030\t \003(\0132\036.hbase.pb.TableServerTimesta" + - "mp\0225\n\026dependent_backup_image\030\n \003(\0132\025.hba" + - "se.pb.BackupImage\022\021\n\tcompacted\030\013 \002(\010\"]\n\021" + - "TableBackupStatus\022\"\n\005table\030\001 \002(\0132\023.hbase", - ".pb.TableName\022\022\n\ntarget_dir\030\002 \002(\t\022\020\n\010sna" + - "pshot\030\003 \001(\t\"\323\004\n\rBackupContext\022\021\n\tbackup_" + - "id\030\001 \002(\t\022\"\n\004type\030\002 \002(\0162\024.hbase.pb.Backup" + - "Type\022\027\n\017target_root_dir\030\003 \002(\t\0222\n\005state\030\004" + - " \001(\0162#.hbase.pb.BackupContext.BackupStat" + - "e\0222\n\005phase\030\005 \001(\0162#.hbase.pb.BackupContex" + - "t.BackupPhase\022\026\n\016failed_message\030\006 \001(\t\0228\n" + - "\023table_backup_status\030\007 \003(\0132\033.hbase.pb.Ta" + - "bleBackupStatus\022\020\n\010start_ts\030\010 \001(\004\022\016\n\006end" + - "_ts\030\t \001(\004\022\032\n\022total_bytes_copied\030\n \001(\003\022\027\n", - "\017hlog_target_dir\030\013 \001(\t\022\020\n\010progress\030\014 \001(\r" + - "\"P\n\013BackupState\022\013\n\007WAITING\020\000\022\013\n\007RUNNING\020" + - "\001\022\014\n\010COMPLETE\020\002\022\n\n\006FAILED\020\003\022\r\n\tCANCELLED" + - "\020\004\"}\n\013BackupPhase\022\013\n\007REQUEST\020\000\022\014\n\010SNAPSH" + - "OT\020\001\022\027\n\023PREPARE_INCREMENTAL\020\002\022\020\n\014SNAPSHO" + - "TCOPY\020\003\022\024\n\020INCREMENTAL_COPY\020\004\022\022\n\016STORE_M" + - "ANIFEST\020\005*\'\n\nBackupType\022\010\n\004FULL\020\000\022\017\n\013INC" + - "REMENTAL\020\001BB\n*org.apache.hadoop.hbase.pr" + - "otobuf.generatedB\014BackupProtosH\001\210\001\001\240\001\001" + "\n\014Backup.proto\022\010hbase.pb\032\013HBase.proto\"R\n" + + "\026SnapshotTableStateData\022\"\n\005table\030\001 \002(\0132\023" + + ".hbase.pb.TableName\022\024\n\014snapshotName\030\002 \002(" + + "\t\"\327\001\n\013BackupImage\022\021\n\tbackup_id\030\001 \002(\t\022)\n\013" + + "backup_type\030\002 \002(\0162\024.hbase.pb.BackupType\022" + + "\020\n\010root_dir\030\003 \002(\t\022\'\n\ntable_list\030\004 \003(\0132\023." + + "hbase.pb.TableName\022\020\n\010start_ts\030\005 \002(\004\022\023\n\013" + + "complete_ts\030\006 \002(\004\022(\n\tancestors\030\007 \003(\0132\025.h" + + "base.pb.BackupImage\"4\n\017ServerTimestamp\022\016" + + "\n\006server\030\001 \002(\t\022\021\n\ttimestamp\030\002 \002(\004\"o\n\024Tab", + "leServerTimestamp\022\"\n\005table\030\001 \002(\0132\023.hbase" + + ".pb.TableName\0223\n\020server_timestamp\030\002 \003(\0132" + + "\031.hbase.pb.ServerTimestamp\"\313\002\n\016BackupMan" + + "ifest\022\017\n\007version\030\001 \002(\t\022\021\n\tbackup_id\030\002 \002(" + + "\t\022\"\n\004type\030\003 \002(\0162\024.hbase.pb.BackupType\022\'\n" + + "\ntable_list\030\004 \003(\0132\023.hbase.pb.TableName\022\020" + + "\n\010start_ts\030\005 \002(\004\022\023\n\013complete_ts\030\006 \002(\004\022\023\n" + + "\013total_bytes\030\007 \002(\003\022\021\n\tlog_bytes\030\010 \001(\003\022/\n" + + "\007tst_map\030\t \003(\0132\036.hbase.pb.TableServerTim" + + "estamp\0225\n\026dependent_backup_image\030\n \003(\0132\025", + ".hbase.pb.BackupImage\022\021\n\tcompacted\030\013 \002(\010" + + "\"]\n\021TableBackupStatus\022\"\n\005table\030\001 \002(\0132\023.h" + + "base.pb.TableName\022\022\n\ntarget_dir\030\002 \002(\t\022\020\n" + + "\010snapshot\030\003 \001(\t\"\323\004\n\rBackupContext\022\021\n\tbac" + + "kup_id\030\001 \002(\t\022\"\n\004type\030\002 \002(\0162\024.hbase.pb.Ba" + + "ckupType\022\027\n\017target_root_dir\030\003 \002(\t\0222\n\005sta" + + "te\030\004 \001(\0162#.hbase.pb.BackupContext.Backup" + + "State\0222\n\005phase\030\005 \001(\0162#.hbase.pb.BackupCo" + + "ntext.BackupPhase\022\026\n\016failed_message\030\006 \001(" + + "\t\0228\n\023table_backup_status\030\007 \003(\0132\033.hbase.p", + "b.TableBackupStatus\022\020\n\010start_ts\030\010 \001(\004\022\016\n" + + "\006end_ts\030\t \001(\004\022\032\n\022total_bytes_copied\030\n \001(" + + "\003\022\027\n\017hlog_target_dir\030\013 \001(\t\022\020\n\010progress\030\014" + + " \001(\r\"P\n\013BackupState\022\013\n\007WAITING\020\000\022\013\n\007RUNN" + + "ING\020\001\022\014\n\010COMPLETE\020\002\022\n\n\006FAILED\020\003\022\r\n\tCANCE" + + "LLED\020\004\"}\n\013BackupPhase\022\013\n\007REQUEST\020\000\022\014\n\010SN" + + "APSHOT\020\001\022\027\n\023PREPARE_INCREMENTAL\020\002\022\020\n\014SNA" + + "PSHOTCOPY\020\003\022\024\n\020INCREMENTAL_COPY\020\004\022\022\n\016STO" + + "RE_MANIFEST\020\005*k\n\024FullTableBackupState\022\026\n" + + "\022PRE_SNAPSHOT_TABLE\020\001\022\023\n\017SNAPSHOT_TABLES", + "\020\002\022\021\n\rSNAPSHOT_COPY\020\003\022\023\n\017BACKUP_COMPLETE" + + "\020\004*f\n\033IncrementalTableBackupState\022\027\n\023PRE" + + "PARE_INCREMENTAL\020\001\022\024\n\020INCREMENTAL_COPY\020\002" + + "\022\030\n\024INCR_BACKUP_COMPLETE\020\003*(\n\022SnapshotTa" + + "bleState\022\022\n\016SNAPSHOT_TABLE\020\001*\'\n\nBackupTy" + + "pe\022\010\n\004FULL\020\000\022\017\n\013INCREMENTAL\020\001BB\n*org.apa" + + "che.hadoop.hbase.protobuf.generatedB\014Bac" + + "kupProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.Descriptors.FileDescriptor root) { descriptor = root; - internal_static_hbase_pb_BackupImage_descriptor = + internal_static_hbase_pb_SnapshotTableStateData_descriptor = getDescriptor().getMessageTypes().get(0); + internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_SnapshotTableStateData_descriptor, + new java.lang.String[] { "Table", "SnapshotName", }); + internal_static_hbase_pb_BackupImage_descriptor = + getDescriptor().getMessageTypes().get(1); internal_static_hbase_pb_BackupImage_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BackupImage_descriptor, new java.lang.String[] { "BackupId", "BackupType", "RootDir", "TableList", "StartTs", "CompleteTs", "Ancestors", }); internal_static_hbase_pb_ServerTimestamp_descriptor = - getDescriptor().getMessageTypes().get(1); + getDescriptor().getMessageTypes().get(2); internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ServerTimestamp_descriptor, new java.lang.String[] { "Server", "Timestamp", }); internal_static_hbase_pb_TableServerTimestamp_descriptor = - getDescriptor().getMessageTypes().get(2); + getDescriptor().getMessageTypes().get(3); internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_TableServerTimestamp_descriptor, new java.lang.String[] { "Table", "ServerTimestamp", }); internal_static_hbase_pb_BackupManifest_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(4); internal_static_hbase_pb_BackupManifest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BackupManifest_descriptor, new java.lang.String[] { "Version", "BackupId", "Type", "TableList", "StartTs", "CompleteTs", "TotalBytes", "LogBytes", "TstMap", "DependentBackupImage", "Compacted", }); internal_static_hbase_pb_TableBackupStatus_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(5); internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_TableBackupStatus_descriptor, new java.lang.String[] { "Table", "TargetDir", "Snapshot", }); internal_static_hbase_pb_BackupContext_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(6); internal_static_hbase_pb_BackupContext_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BackupContext_descriptor, diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index 073eba9..2064db6 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -59215,6 +59215,1510 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:hbase.pb.SecurityCapabilitiesResponse) } + public interface FullTableBackupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string backup_id = 1; + /** + * required string backup_id = 1; + */ + boolean hasBackupId(); + /** + * required string backup_id = 1; + */ + java.lang.String getBackupId(); + /** + * required string backup_id = 1; + */ + com.google.protobuf.ByteString + getBackupIdBytes(); + + // repeated .hbase.pb.TableName tables = 2; + /** + * repeated .hbase.pb.TableName tables = 2; + */ + java.util.List + getTablesList(); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + int getTablesCount(); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + java.util.List + getTablesOrBuilderList(); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index); + + // required string target_root_dir = 3; + /** + * required string target_root_dir = 3; + */ + boolean hasTargetRootDir(); + /** + * required string target_root_dir = 3; + */ + java.lang.String getTargetRootDir(); + /** + * required string target_root_dir = 3; + */ + com.google.protobuf.ByteString + getTargetRootDirBytes(); + } + /** + * Protobuf type {@code hbase.pb.FullTableBackupRequest} + */ + public static final class FullTableBackupRequest extends + com.google.protobuf.GeneratedMessage + implements FullTableBackupRequestOrBuilder { + // Use FullTableBackupRequest.newBuilder() to construct. + private FullTableBackupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private FullTableBackupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final FullTableBackupRequest defaultInstance; + public static FullTableBackupRequest getDefaultInstance() { + return defaultInstance; + } + + public FullTableBackupRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private FullTableBackupRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + backupId_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + tables_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + case 26: { + bitField0_ |= 0x00000002; + targetRootDir_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_FullTableBackupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_FullTableBackupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public FullTableBackupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new FullTableBackupRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string backup_id = 1; + public static final int BACKUP_ID_FIELD_NUMBER = 1; + private java.lang.Object backupId_; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + backupId_ = s; + } + return s; + } + } + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .hbase.pb.TableName tables = 2; + public static final int TABLES_FIELD_NUMBER = 2; + private java.util.List tables_; + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List getTablesList() { + return tables_; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List + getTablesOrBuilderList() { + return tables_; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public int getTablesCount() { + return tables_.size(); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { + return tables_.get(index); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + return tables_.get(index); + } + + // required string target_root_dir = 3; + public static final int TARGET_ROOT_DIR_FIELD_NUMBER = 3; + private java.lang.Object targetRootDir_; + /** + * required string target_root_dir = 3; + */ + public boolean hasTargetRootDir() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string target_root_dir = 3; + */ + public java.lang.String getTargetRootDir() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetRootDir_ = s; + } + return s; + } + } + /** + * required string target_root_dir = 3; + */ + public com.google.protobuf.ByteString + getTargetRootDirBytes() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetRootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + backupId_ = ""; + tables_ = java.util.Collections.emptyList(); + targetRootDir_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBackupId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTargetRootDir()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getBackupIdBytes()); + } + for (int i = 0; i < tables_.size(); i++) { + output.writeMessage(2, tables_.get(i)); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(3, getTargetRootDirBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getBackupIdBytes()); + } + for (int i = 0; i < tables_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tables_.get(i)); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getTargetRootDirBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest) obj; + + boolean result = true; + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); + } + result = result && getTablesList() + .equals(other.getTablesList()); + result = result && (hasTargetRootDir() == other.hasTargetRootDir()); + if (hasTargetRootDir()) { + result = result && getTargetRootDir() + .equals(other.getTargetRootDir()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + } + if (getTablesCount() > 0) { + hash = (37 * hash) + TABLES_FIELD_NUMBER; + hash = (53 * hash) + getTablesList().hashCode(); + } + if (hasTargetRootDir()) { + hash = (37 * hash) + TARGET_ROOT_DIR_FIELD_NUMBER; + hash = (53 * hash) + getTargetRootDir().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.FullTableBackupRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_FullTableBackupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_FullTableBackupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTablesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + backupId_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + tablesBuilder_.clear(); + } + targetRootDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_FullTableBackupRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.backupId_ = backupId_; + if (tablesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.tables_ = tables_; + } else { + result.tables_ = tablesBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000002; + } + result.targetRootDir_ = targetRootDir_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest.getDefaultInstance()) return this; + if (other.hasBackupId()) { + bitField0_ |= 0x00000001; + backupId_ = other.backupId_; + onChanged(); + } + if (tablesBuilder_ == null) { + if (!other.tables_.isEmpty()) { + if (tables_.isEmpty()) { + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureTablesIsMutable(); + tables_.addAll(other.tables_); + } + onChanged(); + } + } else { + if (!other.tables_.isEmpty()) { + if (tablesBuilder_.isEmpty()) { + tablesBuilder_.dispose(); + tablesBuilder_ = null; + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000002); + tablesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTablesFieldBuilder() : null; + } else { + tablesBuilder_.addAllMessages(other.tables_); + } + } + } + if (other.hasTargetRootDir()) { + bitField0_ |= 0x00000004; + targetRootDir_ = other.targetRootDir_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBackupId()) { + + return false; + } + if (!hasTargetRootDir()) { + + return false; + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string backup_id = 1; + private java.lang.Object backupId_ = ""; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string backup_id = 1; + */ + public Builder setBackupId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; + onChanged(); + return this; + } + /** + * required string backup_id = 1; + */ + public Builder clearBackupId() { + bitField0_ = (bitField0_ & ~0x00000001); + backupId_ = getDefaultInstance().getBackupId(); + onChanged(); + return this; + } + /** + * required string backup_id = 1; + */ + public Builder setBackupIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; + onChanged(); + return this; + } + + // repeated .hbase.pb.TableName tables = 2; + private java.util.List tables_ = + java.util.Collections.emptyList(); + private void ensureTablesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = new java.util.ArrayList(tables_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tablesBuilder_; + + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List getTablesList() { + if (tablesBuilder_ == null) { + return java.util.Collections.unmodifiableList(tables_); + } else { + return tablesBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public int getTablesCount() { + if (tablesBuilder_ == null) { + return tables_.size(); + } else { + return tablesBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); + } else { + return tablesBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.set(index, value); + onChanged(); + } else { + tablesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.set(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(value); + onChanged(); + } else { + tablesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(index, value); + onChanged(); + } else { + tablesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addAllTables( + java.lang.Iterable values) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + super.addAll(values, tables_); + onChanged(); + } else { + tablesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder clearTables() { + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + tablesBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder removeTables(int index) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.remove(index); + onChanged(); + } else { + tablesBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTablesBuilder( + int index) { + return getTablesFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); } else { + return tablesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List + getTablesOrBuilderList() { + if (tablesBuilder_ != null) { + return tablesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tables_); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder() { + return getTablesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder( + int index) { + return getTablesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List + getTablesBuilderList() { + return getTablesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTablesFieldBuilder() { + if (tablesBuilder_ == null) { + tablesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tables_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + tables_ = null; + } + return tablesBuilder_; + } + + // required string target_root_dir = 3; + private java.lang.Object targetRootDir_ = ""; + /** + * required string target_root_dir = 3; + */ + public boolean hasTargetRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string target_root_dir = 3; + */ + public java.lang.String getTargetRootDir() { + java.lang.Object ref = targetRootDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetRootDir_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string target_root_dir = 3; + */ + public com.google.protobuf.ByteString + getTargetRootDirBytes() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetRootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string target_root_dir = 3; + */ + public Builder setTargetRootDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + targetRootDir_ = value; + onChanged(); + return this; + } + /** + * required string target_root_dir = 3; + */ + public Builder clearTargetRootDir() { + bitField0_ = (bitField0_ & ~0x00000004); + targetRootDir_ = getDefaultInstance().getTargetRootDir(); + onChanged(); + return this; + } + /** + * required string target_root_dir = 3; + */ + public Builder setTargetRootDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + targetRootDir_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.FullTableBackupRequest) + } + + static { + defaultInstance = new FullTableBackupRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.FullTableBackupRequest) + } + + public interface FullTableBackupResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional uint64 proc_id = 1; + /** + * optional uint64 proc_id = 1; + */ + boolean hasProcId(); + /** + * optional uint64 proc_id = 1; + */ + long getProcId(); + } + /** + * Protobuf type {@code hbase.pb.FullTableBackupResponse} + */ + public static final class FullTableBackupResponse extends + com.google.protobuf.GeneratedMessage + implements FullTableBackupResponseOrBuilder { + // Use FullTableBackupResponse.newBuilder() to construct. + private FullTableBackupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private FullTableBackupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final FullTableBackupResponse defaultInstance; + public static FullTableBackupResponse getDefaultInstance() { + return defaultInstance; + } + + public FullTableBackupResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private FullTableBackupResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_FullTableBackupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_FullTableBackupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public FullTableBackupResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new FullTableBackupResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional uint64 proc_id = 1; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + + private void initFields() { + procId_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, procId_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, procId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse) obj; + + boolean result = true; + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.FullTableBackupResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_FullTableBackupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_FullTableBackupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_FullTableBackupResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.procId_ = procId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional uint64 proc_id = 1; + private long procId_ ; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); + return this; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000001); + procId_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.FullTableBackupResponse) + } + + static { + defaultInstance = new FullTableBackupResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.FullTableBackupResponse) + } + /** * Protobuf service {@code hbase.pb.MasterService} */ @@ -59936,6 +61440,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc fullTableBackup(.hbase.pb.FullTableBackupRequest) returns (.hbase.pb.FullTableBackupResponse); + * + *
+       ** full backup table set 
+       * 
+ */ + public abstract void fullTableBackup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -60397,6 +61913,14 @@ public final class MasterProtos { impl.listProcedures(controller, request, done); } + @java.lang.Override + public void fullTableBackup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest request, + com.google.protobuf.RpcCallback done) { + impl.fullTableBackup(controller, request, done); + } + }; } @@ -60533,6 +62057,8 @@ public final class MasterProtos { return impl.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request); case 56: return impl.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request); + case 57: + return impl.fullTableBackup(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -60661,6 +62187,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); + case 57: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -60789,6 +62317,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); + case 57: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -61510,6 +63040,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc fullTableBackup(.hbase.pb.FullTableBackupRequest) returns (.hbase.pb.FullTableBackupResponse); + * + *
+     ** full backup table set 
+     * 
+ */ + public abstract void fullTableBackup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -61817,6 +63359,11 @@ public final class MasterProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 57: + this.fullTableBackup(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -61945,6 +63492,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); + case 57: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -62073,6 +63622,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); + case 57: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -62948,6 +64499,21 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance())); } + + public void fullTableBackup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(57), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -63240,6 +64806,11 @@ public final class MasterProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse fullTableBackup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -63932,6 +65503,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse fullTableBackup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(57), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.FullTableBackupResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:hbase.pb.MasterService) @@ -64482,6 +66065,16 @@ public final class MasterProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_FullTableBackupRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_FullTableBackupRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_FullTableBackupResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_FullTableBackupResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -64682,140 +66275,146 @@ public final class MasterProtos { "\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHENTICATION" + "\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n\rAUTHORI", "ZATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003\022\023\n\017CELL" + - "_VISIBILITY\020\004*(\n\020MasterSwitchType\022\t\n\005SPL" + - "IT\020\000\022\t\n\005MERGE\020\0012\323(\n\rMasterService\022e\n\024Get" + - "SchemaAlterStatus\022%.hbase.pb.GetSchemaAl" + - "terStatusRequest\032&.hbase.pb.GetSchemaAlt" + - "erStatusResponse\022b\n\023GetTableDescriptors\022" + - "$.hbase.pb.GetTableDescriptorsRequest\032%." + - "hbase.pb.GetTableDescriptorsResponse\022P\n\r" + - "GetTableNames\022\036.hbase.pb.GetTableNamesRe" + - "quest\032\037.hbase.pb.GetTableNamesResponse\022Y", - "\n\020GetClusterStatus\022!.hbase.pb.GetCluster" + - "StatusRequest\032\".hbase.pb.GetClusterStatu" + - "sResponse\022V\n\017IsMasterRunning\022 .hbase.pb." + - "IsMasterRunningRequest\032!.hbase.pb.IsMast" + - "erRunningResponse\022D\n\tAddColumn\022\032.hbase.p" + - "b.AddColumnRequest\032\033.hbase.pb.AddColumnR" + - "esponse\022M\n\014DeleteColumn\022\035.hbase.pb.Delet" + - "eColumnRequest\032\036.hbase.pb.DeleteColumnRe" + - "sponse\022M\n\014ModifyColumn\022\035.hbase.pb.Modify" + - "ColumnRequest\032\036.hbase.pb.ModifyColumnRes", - "ponse\022G\n\nMoveRegion\022\033.hbase.pb.MoveRegio" + - "nRequest\032\034.hbase.pb.MoveRegionResponse\022k" + - "\n\026DispatchMergingRegions\022\'.hbase.pb.Disp" + - "atchMergingRegionsRequest\032(.hbase.pb.Dis" + - "patchMergingRegionsResponse\022M\n\014AssignReg" + - "ion\022\035.hbase.pb.AssignRegionRequest\032\036.hba" + - "se.pb.AssignRegionResponse\022S\n\016UnassignRe" + - "gion\022\037.hbase.pb.UnassignRegionRequest\032 ." + - "hbase.pb.UnassignRegionResponse\022P\n\rOffli" + - "neRegion\022\036.hbase.pb.OfflineRegionRequest", - "\032\037.hbase.pb.OfflineRegionResponse\022J\n\013Del" + - "eteTable\022\034.hbase.pb.DeleteTableRequest\032\035" + - ".hbase.pb.DeleteTableResponse\022P\n\rtruncat" + - "eTable\022\036.hbase.pb.TruncateTableRequest\032\037" + - ".hbase.pb.TruncateTableResponse\022J\n\013Enabl" + - "eTable\022\034.hbase.pb.EnableTableRequest\032\035.h" + - "base.pb.EnableTableResponse\022M\n\014DisableTa" + - "ble\022\035.hbase.pb.DisableTableRequest\032\036.hba" + - "se.pb.DisableTableResponse\022J\n\013ModifyTabl" + - "e\022\034.hbase.pb.ModifyTableRequest\032\035.hbase.", - "pb.ModifyTableResponse\022J\n\013CreateTable\022\034." + - "hbase.pb.CreateTableRequest\032\035.hbase.pb.C" + - "reateTableResponse\022A\n\010Shutdown\022\031.hbase.p" + - "b.ShutdownRequest\032\032.hbase.pb.ShutdownRes" + - "ponse\022G\n\nStopMaster\022\033.hbase.pb.StopMaste" + - "rRequest\032\034.hbase.pb.StopMasterResponse\022>" + - "\n\007Balance\022\030.hbase.pb.BalanceRequest\032\031.hb" + - "ase.pb.BalanceResponse\022_\n\022SetBalancerRun" + - "ning\022#.hbase.pb.SetBalancerRunningReques" + - "t\032$.hbase.pb.SetBalancerRunningResponse\022", - "\\\n\021IsBalancerEnabled\022\".hbase.pb.IsBalanc" + - "erEnabledRequest\032#.hbase.pb.IsBalancerEn" + - "abledResponse\022k\n\026SetSplitOrMergeEnabled\022" + - "\'.hbase.pb.SetSplitOrMergeEnabledRequest" + - "\032(.hbase.pb.SetSplitOrMergeEnabledRespon" + - "se\022h\n\025IsSplitOrMergeEnabled\022&.hbase.pb.I" + - "sSplitOrMergeEnabledRequest\032\'.hbase.pb.I" + - "sSplitOrMergeEnabledResponse\022D\n\tNormaliz" + - "e\022\032.hbase.pb.NormalizeRequest\032\033.hbase.pb" + - ".NormalizeResponse\022e\n\024SetNormalizerRunni", - "ng\022%.hbase.pb.SetNormalizerRunningReques" + - "t\032&.hbase.pb.SetNormalizerRunningRespons" + - "e\022b\n\023IsNormalizerEnabled\022$.hbase.pb.IsNo" + - "rmalizerEnabledRequest\032%.hbase.pb.IsNorm" + - "alizerEnabledResponse\022S\n\016RunCatalogScan\022" + - "\037.hbase.pb.RunCatalogScanRequest\032 .hbase" + - ".pb.RunCatalogScanResponse\022e\n\024EnableCata" + - "logJanitor\022%.hbase.pb.EnableCatalogJanit" + - "orRequest\032&.hbase.pb.EnableCatalogJanito" + - "rResponse\022n\n\027IsCatalogJanitorEnabled\022(.h", - "base.pb.IsCatalogJanitorEnabledRequest\032)" + - ".hbase.pb.IsCatalogJanitorEnabledRespons" + - "e\022^\n\021ExecMasterService\022#.hbase.pb.Coproc" + - "essorServiceRequest\032$.hbase.pb.Coprocess" + - "orServiceResponse\022A\n\010Snapshot\022\031.hbase.pb" + - ".SnapshotRequest\032\032.hbase.pb.SnapshotResp" + - "onse\022h\n\025GetCompletedSnapshots\022&.hbase.pb" + - ".GetCompletedSnapshotsRequest\032\'.hbase.pb" + - ".GetCompletedSnapshotsResponse\022S\n\016Delete" + - "Snapshot\022\037.hbase.pb.DeleteSnapshotReques", - "t\032 .hbase.pb.DeleteSnapshotResponse\022S\n\016I" + - "sSnapshotDone\022\037.hbase.pb.IsSnapshotDoneR" + - "equest\032 .hbase.pb.IsSnapshotDoneResponse" + - "\022V\n\017RestoreSnapshot\022 .hbase.pb.RestoreSn" + - "apshotRequest\032!.hbase.pb.RestoreSnapshot" + - "Response\022h\n\025IsRestoreSnapshotDone\022&.hbas" + - "e.pb.IsRestoreSnapshotDoneRequest\032\'.hbas" + - "e.pb.IsRestoreSnapshotDoneResponse\022P\n\rEx" + - "ecProcedure\022\036.hbase.pb.ExecProcedureRequ" + - "est\032\037.hbase.pb.ExecProcedureResponse\022W\n\024", - "ExecProcedureWithRet\022\036.hbase.pb.ExecProc" + - "edureRequest\032\037.hbase.pb.ExecProcedureRes" + - "ponse\022V\n\017IsProcedureDone\022 .hbase.pb.IsPr" + - "ocedureDoneRequest\032!.hbase.pb.IsProcedur" + - "eDoneResponse\022V\n\017ModifyNamespace\022 .hbase" + - ".pb.ModifyNamespaceRequest\032!.hbase.pb.Mo" + - "difyNamespaceResponse\022V\n\017CreateNamespace" + - "\022 .hbase.pb.CreateNamespaceRequest\032!.hba" + - "se.pb.CreateNamespaceResponse\022V\n\017DeleteN" + - "amespace\022 .hbase.pb.DeleteNamespaceReque", - "st\032!.hbase.pb.DeleteNamespaceResponse\022k\n" + - "\026GetNamespaceDescriptor\022\'.hbase.pb.GetNa" + - "mespaceDescriptorRequest\032(.hbase.pb.GetN" + - "amespaceDescriptorResponse\022q\n\030ListNamesp" + - "aceDescriptors\022).hbase.pb.ListNamespaceD" + - "escriptorsRequest\032*.hbase.pb.ListNamespa" + - "ceDescriptorsResponse\022\206\001\n\037ListTableDescr" + - "iptorsByNamespace\0220.hbase.pb.ListTableDe" + - "scriptorsByNamespaceRequest\0321.hbase.pb.L" + - "istTableDescriptorsByNamespaceResponse\022t", - "\n\031ListTableNamesByNamespace\022*.hbase.pb.L" + - "istTableNamesByNamespaceRequest\032+.hbase." + - "pb.ListTableNamesByNamespaceResponse\022P\n\r" + - "GetTableState\022\036.hbase.pb.GetTableStateRe" + - "quest\032\037.hbase.pb.GetTableStateResponse\022A" + - "\n\010SetQuota\022\031.hbase.pb.SetQuotaRequest\032\032." + - "hbase.pb.SetQuotaResponse\022x\n\037getLastMajo" + - "rCompactionTimestamp\022).hbase.pb.MajorCom" + - "pactionTimestampRequest\032*.hbase.pb.Major" + - "CompactionTimestampResponse\022\212\001\n(getLastM", - "ajorCompactionTimestampForRegion\0222.hbase" + - ".pb.MajorCompactionTimestampForRegionReq" + - "uest\032*.hbase.pb.MajorCompactionTimestamp" + - "Response\022_\n\022getProcedureResult\022#.hbase.p" + - "b.GetProcedureResultRequest\032$.hbase.pb.G" + - "etProcedureResultResponse\022h\n\027getSecurity" + - "Capabilities\022%.hbase.pb.SecurityCapabili" + - "tiesRequest\032&.hbase.pb.SecurityCapabilit" + - "iesResponse\022S\n\016AbortProcedure\022\037.hbase.pb" + - ".AbortProcedureRequest\032 .hbase.pb.AbortP", - "rocedureResponse\022S\n\016ListProcedures\022\037.hba" + - "se.pb.ListProceduresRequest\032 .hbase.pb.L" + - "istProceduresResponseBB\n*org.apache.hado" + - "op.hbase.protobuf.generatedB\014MasterProto" + - "sH\001\210\001\001\240\001\001" + "_VISIBILITY\020\004\"i\n\026FullTableBackupRequest\022" + + "\021\n\tbackup_id\030\001 \002(\t\022#\n\006tables\030\002 \003(\0132\023.hba" + + "se.pb.TableName\022\027\n\017target_root_dir\030\003 \002(\t" + + "\"*\n\027FullTableBackupResponse\022\017\n\007proc_id\030\001" + + " \001(\004*(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005M" + + "ERGE\020\0012\253)\n\rMasterService\022e\n\024GetSchemaAlt" + + "erStatus\022%.hbase.pb.GetSchemaAlterStatus" + + "Request\032&.hbase.pb.GetSchemaAlterStatusR" + + "esponse\022b\n\023GetTableDescriptors\022$.hbase.p", + "b.GetTableDescriptorsRequest\032%.hbase.pb." + + "GetTableDescriptorsResponse\022P\n\rGetTableN" + + "ames\022\036.hbase.pb.GetTableNamesRequest\032\037.h" + + "base.pb.GetTableNamesResponse\022Y\n\020GetClus" + + "terStatus\022!.hbase.pb.GetClusterStatusReq" + + "uest\032\".hbase.pb.GetClusterStatusResponse" + + "\022V\n\017IsMasterRunning\022 .hbase.pb.IsMasterR" + + "unningRequest\032!.hbase.pb.IsMasterRunning" + + "Response\022D\n\tAddColumn\022\032.hbase.pb.AddColu" + + "mnRequest\032\033.hbase.pb.AddColumnResponse\022M", + "\n\014DeleteColumn\022\035.hbase.pb.DeleteColumnRe" + + "quest\032\036.hbase.pb.DeleteColumnResponse\022M\n" + + "\014ModifyColumn\022\035.hbase.pb.ModifyColumnReq" + + "uest\032\036.hbase.pb.ModifyColumnResponse\022G\n\n" + + "MoveRegion\022\033.hbase.pb.MoveRegionRequest\032" + + "\034.hbase.pb.MoveRegionResponse\022k\n\026Dispatc" + + "hMergingRegions\022\'.hbase.pb.DispatchMergi" + + "ngRegionsRequest\032(.hbase.pb.DispatchMerg" + + "ingRegionsResponse\022M\n\014AssignRegion\022\035.hba" + + "se.pb.AssignRegionRequest\032\036.hbase.pb.Ass", + "ignRegionResponse\022S\n\016UnassignRegion\022\037.hb" + + "ase.pb.UnassignRegionRequest\032 .hbase.pb." + + "UnassignRegionResponse\022P\n\rOfflineRegion\022" + + "\036.hbase.pb.OfflineRegionRequest\032\037.hbase." + + "pb.OfflineRegionResponse\022J\n\013DeleteTable\022" + + "\034.hbase.pb.DeleteTableRequest\032\035.hbase.pb" + + ".DeleteTableResponse\022P\n\rtruncateTable\022\036." + + "hbase.pb.TruncateTableRequest\032\037.hbase.pb" + + ".TruncateTableResponse\022J\n\013EnableTable\022\034." + + "hbase.pb.EnableTableRequest\032\035.hbase.pb.E", + "nableTableResponse\022M\n\014DisableTable\022\035.hba" + + "se.pb.DisableTableRequest\032\036.hbase.pb.Dis" + + "ableTableResponse\022J\n\013ModifyTable\022\034.hbase" + + ".pb.ModifyTableRequest\032\035.hbase.pb.Modify" + + "TableResponse\022J\n\013CreateTable\022\034.hbase.pb." + + "CreateTableRequest\032\035.hbase.pb.CreateTabl" + + "eResponse\022A\n\010Shutdown\022\031.hbase.pb.Shutdow" + + "nRequest\032\032.hbase.pb.ShutdownResponse\022G\n\n" + + "StopMaster\022\033.hbase.pb.StopMasterRequest\032" + + "\034.hbase.pb.StopMasterResponse\022>\n\007Balance", + "\022\030.hbase.pb.BalanceRequest\032\031.hbase.pb.Ba" + + "lanceResponse\022_\n\022SetBalancerRunning\022#.hb" + + "ase.pb.SetBalancerRunningRequest\032$.hbase" + + ".pb.SetBalancerRunningResponse\022\\\n\021IsBala" + + "ncerEnabled\022\".hbase.pb.IsBalancerEnabled" + + "Request\032#.hbase.pb.IsBalancerEnabledResp" + + "onse\022k\n\026SetSplitOrMergeEnabled\022\'.hbase.p" + + "b.SetSplitOrMergeEnabledRequest\032(.hbase." + + "pb.SetSplitOrMergeEnabledResponse\022h\n\025IsS" + + "plitOrMergeEnabled\022&.hbase.pb.IsSplitOrM", + "ergeEnabledRequest\032\'.hbase.pb.IsSplitOrM" + + "ergeEnabledResponse\022D\n\tNormalize\022\032.hbase" + + ".pb.NormalizeRequest\032\033.hbase.pb.Normaliz" + + "eResponse\022e\n\024SetNormalizerRunning\022%.hbas" + + "e.pb.SetNormalizerRunningRequest\032&.hbase" + + ".pb.SetNormalizerRunningResponse\022b\n\023IsNo" + + "rmalizerEnabled\022$.hbase.pb.IsNormalizerE" + + "nabledRequest\032%.hbase.pb.IsNormalizerEna" + + "bledResponse\022S\n\016RunCatalogScan\022\037.hbase.p" + + "b.RunCatalogScanRequest\032 .hbase.pb.RunCa", + "talogScanResponse\022e\n\024EnableCatalogJanito" + + "r\022%.hbase.pb.EnableCatalogJanitorRequest" + + "\032&.hbase.pb.EnableCatalogJanitorResponse" + + "\022n\n\027IsCatalogJanitorEnabled\022(.hbase.pb.I" + + "sCatalogJanitorEnabledRequest\032).hbase.pb" + + ".IsCatalogJanitorEnabledResponse\022^\n\021Exec" + + "MasterService\022#.hbase.pb.CoprocessorServ" + + "iceRequest\032$.hbase.pb.CoprocessorService" + + "Response\022A\n\010Snapshot\022\031.hbase.pb.Snapshot" + + "Request\032\032.hbase.pb.SnapshotResponse\022h\n\025G", + "etCompletedSnapshots\022&.hbase.pb.GetCompl" + + "etedSnapshotsRequest\032\'.hbase.pb.GetCompl" + + "etedSnapshotsResponse\022S\n\016DeleteSnapshot\022" + + "\037.hbase.pb.DeleteSnapshotRequest\032 .hbase" + + ".pb.DeleteSnapshotResponse\022S\n\016IsSnapshot" + + "Done\022\037.hbase.pb.IsSnapshotDoneRequest\032 ." + + "hbase.pb.IsSnapshotDoneResponse\022V\n\017Resto" + + "reSnapshot\022 .hbase.pb.RestoreSnapshotReq" + + "uest\032!.hbase.pb.RestoreSnapshotResponse\022" + + "h\n\025IsRestoreSnapshotDone\022&.hbase.pb.IsRe", + "storeSnapshotDoneRequest\032\'.hbase.pb.IsRe" + + "storeSnapshotDoneResponse\022P\n\rExecProcedu" + + "re\022\036.hbase.pb.ExecProcedureRequest\032\037.hba" + + "se.pb.ExecProcedureResponse\022W\n\024ExecProce" + + "dureWithRet\022\036.hbase.pb.ExecProcedureRequ" + + "est\032\037.hbase.pb.ExecProcedureResponse\022V\n\017" + + "IsProcedureDone\022 .hbase.pb.IsProcedureDo" + + "neRequest\032!.hbase.pb.IsProcedureDoneResp" + + "onse\022V\n\017ModifyNamespace\022 .hbase.pb.Modif" + + "yNamespaceRequest\032!.hbase.pb.ModifyNames", + "paceResponse\022V\n\017CreateNamespace\022 .hbase." + + "pb.CreateNamespaceRequest\032!.hbase.pb.Cre" + + "ateNamespaceResponse\022V\n\017DeleteNamespace\022" + + " .hbase.pb.DeleteNamespaceRequest\032!.hbas" + + "e.pb.DeleteNamespaceResponse\022k\n\026GetNames" + + "paceDescriptor\022\'.hbase.pb.GetNamespaceDe" + + "scriptorRequest\032(.hbase.pb.GetNamespaceD" + + "escriptorResponse\022q\n\030ListNamespaceDescri" + + "ptors\022).hbase.pb.ListNamespaceDescriptor" + + "sRequest\032*.hbase.pb.ListNamespaceDescrip", + "torsResponse\022\206\001\n\037ListTableDescriptorsByN" + + "amespace\0220.hbase.pb.ListTableDescriptors" + + "ByNamespaceRequest\0321.hbase.pb.ListTableD" + + "escriptorsByNamespaceResponse\022t\n\031ListTab" + + "leNamesByNamespace\022*.hbase.pb.ListTableN" + + "amesByNamespaceRequest\032+.hbase.pb.ListTa" + + "bleNamesByNamespaceResponse\022P\n\rGetTableS" + + "tate\022\036.hbase.pb.GetTableStateRequest\032\037.h" + + "base.pb.GetTableStateResponse\022A\n\010SetQuot" + + "a\022\031.hbase.pb.SetQuotaRequest\032\032.hbase.pb.", + "SetQuotaResponse\022x\n\037getLastMajorCompacti" + + "onTimestamp\022).hbase.pb.MajorCompactionTi" + + "mestampRequest\032*.hbase.pb.MajorCompactio" + + "nTimestampResponse\022\212\001\n(getLastMajorCompa" + + "ctionTimestampForRegion\0222.hbase.pb.Major" + + "CompactionTimestampForRegionRequest\032*.hb" + + "ase.pb.MajorCompactionTimestampResponse\022" + + "_\n\022getProcedureResult\022#.hbase.pb.GetProc" + + "edureResultRequest\032$.hbase.pb.GetProcedu" + + "reResultResponse\022h\n\027getSecurityCapabilit", + "ies\022%.hbase.pb.SecurityCapabilitiesReque" + + "st\032&.hbase.pb.SecurityCapabilitiesRespon" + + "se\022S\n\016AbortProcedure\022\037.hbase.pb.AbortPro" + + "cedureRequest\032 .hbase.pb.AbortProcedureR" + + "esponse\022S\n\016ListProcedures\022\037.hbase.pb.Lis" + + "tProceduresRequest\032 .hbase.pb.ListProced" + + "uresResponse\022V\n\017fullTableBackup\022 .hbase." + + "pb.FullTableBackupRequest\032!.hbase.pb.Ful" + + "lTableBackupResponseBB\n*org.apache.hadoo" + + "p.hbase.protobuf.generatedB\014MasterProtos", + "H\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -65476,6 +67075,18 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor, new java.lang.String[] { "Capabilities", }); + internal_static_hbase_pb_FullTableBackupRequest_descriptor = + getDescriptor().getMessageTypes().get(109); + internal_static_hbase_pb_FullTableBackupRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_FullTableBackupRequest_descriptor, + new java.lang.String[] { "BackupId", "Tables", "TargetRootDir", }); + internal_static_hbase_pb_FullTableBackupResponse_descriptor = + getDescriptor().getMessageTypes().get(110); + internal_static_hbase_pb_FullTableBackupResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_FullTableBackupResponse_descriptor, + new java.lang.String[] { "ProcId", }); return null; } }; diff --git a/hbase-protocol/src/main/protobuf/Backup.proto b/hbase-protocol/src/main/protobuf/Backup.proto index 383b990..a30dcf2 100644 --- a/hbase-protocol/src/main/protobuf/Backup.proto +++ b/hbase-protocol/src/main/protobuf/Backup.proto @@ -27,6 +27,27 @@ option optimize_for = SPEED; import "HBase.proto"; +enum FullTableBackupState { + PRE_SNAPSHOT_TABLE = 1; + SNAPSHOT_TABLES = 2; + SNAPSHOT_COPY = 3; + BACKUP_COMPLETE = 4; +} + +enum IncrementalTableBackupState { + PREPARE_INCREMENTAL = 1; + INCREMENTAL_COPY = 2; + INCR_BACKUP_COMPLETE = 3; +} + +enum SnapshotTableState { + SNAPSHOT_TABLE = 1; +} +message SnapshotTableStateData { + required TableName table = 1; + required string snapshotName = 2; +} + enum BackupType { FULL = 0; INCREMENTAL = 1; diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto index 79bb862..c1adf05 100644 --- a/hbase-protocol/src/main/protobuf/Master.proto +++ b/hbase-protocol/src/main/protobuf/Master.proto @@ -540,6 +540,16 @@ message SecurityCapabilitiesResponse { repeated Capability capabilities = 1; } +message FullTableBackupRequest { + required string backup_id = 1; + repeated TableName tables = 2; + required string target_root_dir = 3; +} + +message FullTableBackupResponse { + optional uint64 proc_id = 1; +} + service MasterService { /** Used by the client to get the number of regions that have received the updated schema */ rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest) @@ -814,4 +824,8 @@ service MasterService { /** returns a list of procedures */ rpc ListProcedures(ListProceduresRequest) returns(ListProceduresResponse); + + /** full backup table set */ + rpc fullTableBackup(FullTableBackupRequest) + returns(FullTableBackupResponse); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java index 1be0c3b..d436931 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.backup.impl; import java.io.IOException; +import java.io.InputStream; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -308,7 +309,7 @@ public class BackupContext { return null; } - public byte[] toByteArray() throws IOException { + BackupProtos.BackupContext toBackupContext() { BackupProtos.BackupContext.Builder builder = BackupProtos.BackupContext.newBuilder(); builder.setBackupId(getBackupId()); @@ -332,8 +333,11 @@ public class BackupContext { builder.setTargetRootDir(getTargetRootDir()); builder.setTotalBytesCopied(getTotalBytesCopied()); builder.setType(BackupProtos.BackupType.valueOf(getType().name())); - byte[] data = builder.build().toByteArray(); - return data; + return builder.build(); + } + + public byte[] toByteArray() throws IOException { + return toBackupContext().toByteArray(); } private void setBackupStatusMap(Builder builder) { @@ -343,9 +347,15 @@ public class BackupContext { } public static BackupContext fromByteArray(byte[] data) throws IOException { + return fromProto(BackupProtos.BackupContext.parseFrom(data)); + } + + public static BackupContext fromStream(final InputStream stream) throws IOException { + return fromProto(BackupProtos.BackupContext.parseDelimitedFrom(stream)); + } + static BackupContext fromProto(BackupProtos.BackupContext proto) { BackupContext context = new BackupContext(); - BackupProtos.BackupContext proto = BackupProtos.BackupContext.parseFrom(data); context.setBackupId(proto.getBackupId()); context.setBackupStatusMap(toMap(proto.getTableBackupStatusList())); context.setEndTs(proto.getEndTs()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 89cfd18..f039dc1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -76,6 +76,7 @@ import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.backup.impl.BackupManager; +import org.apache.hadoop.hbase.backup.impl.FullTableBackupProcedure; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -2594,6 +2595,16 @@ public class HMaster extends HRegionServer implements MasterServices { return procInfoList; } + + @Override + public long fullTableBackup(final String backupId, + final List tableList, final String targetRootDir) throws IOException { + long procId = this.procedureExecutor.submitProcedure( + new FullTableBackupProcedure(procedureExecutor.getEnvironment(), backupId, + tableList, targetRootDir)); + ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId); + return procId; + } /** * Returns the list of table descriptors that match the specified request diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index d6a53cd..89bc568 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1052,6 +1052,23 @@ public class MasterRpcServices extends RSRpcServices } @Override + public MasterProtos.FullTableBackupResponse fullTableBackup( + RpcController controller, + MasterProtos.FullTableBackupRequest request) throws ServiceException { + try { + FullTableBackupResponse.Builder response = FullTableBackupResponse.newBuilder(); + List tablesList = new ArrayList<>(request.getTablesList().size()); + for (HBaseProtos.TableName table : request.getTablesList()) { + tablesList.add(ProtobufUtil.toTableName(table)); + } + master.fullTableBackup(request.getBackupId(), tablesList, request.getTargetRootDir()); + return response.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c, ListTableDescriptorsByNamespaceRequest request) throws ServiceException { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 59c7a88..13adaa0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; import java.util.List; +import java.util.concurrent.Future; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -167,6 +168,18 @@ public interface MasterServices extends Server { throws IOException; /** + * Full backup given list of tables + * @param backupId identifier for the full backup + * @param tableList list of tables to backup + * @param targetRootDir root dir for saving the backup + * @throws IOException + */ + public long fullTableBackup( + final String backupId, + final List tableList, + final String targetRootDir) throws IOException; + + /** * Enable an existing table * @param tableName The table name * @param nonceGroup diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 123e8b5..171c782 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -418,6 +418,15 @@ public class TestCatalogJanitor { } @Override + public long fullTableBackup( + final String backupId, + final List tableList, + final String targetRootDir) throws IOException { + return -1; + } + + + @Override public List listTableDescriptorsByNamespace(String name) throws IOException { return null; //To change body of implemented methods use File | Settings | File Templates. } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupProcedure.java new file mode 100644 index 0000000..4eaeb43 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupProcedure.java @@ -0,0 +1,626 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupRestoreFactory; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupPhase; +import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState; +import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; +import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.FullTableBackupState; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSUtils; + +@InterfaceAudience.Private +public class FullTableBackupProcedure + extends StateMachineProcedure { + private static final Log LOG = LogFactory.getLog(SnapshotTableProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + private Configuration conf; + private String backupId; + private List tableList; + private String targetRootDir; + + private BackupManager backupManager; + private BackupContext backupContext; + + public FullTableBackupProcedure() { + // Required by the Procedure framework to create the procedure on replay + } + + public FullTableBackupProcedure(final MasterProcedureEnv env, + final String backupId, List tableList, String targetRootDir) throws IOException { + backupManager = new BackupManager(env.getMasterConfiguration()); + this.backupId = backupId; + this.tableList = tableList; + this.targetRootDir = targetRootDir; + backupContext = + backupManager.createBackupContext(backupId, BackupType.FULL, tableList, targetRootDir); + } + + /** + * Begin the overall backup. + * @param backupContext backup context + * @throws IOException exception + */ + static void beginBackup(BackupManager backupManager, BackupContext backupContext) + throws IOException { + // set the start timestamp of the overall backup + long startTs = EnvironmentEdgeManager.currentTime(); + backupContext.setStartTs(startTs); + // set overall backup status: ongoing + backupContext.setState(BackupState.RUNNING); + LOG.info("Backup " + backupContext.getBackupId() + " started at " + startTs + "."); + + backupManager.updateBackupStatus(backupContext); + if (LOG.isDebugEnabled()) { + LOG.debug("Backup session " + backupContext.getBackupId() + " has been started."); + } + } + + private static String getMessage(Exception e) { + String msg = e.getMessage(); + if (msg == null || msg.equals("")) { + msg = e.getClass().getName(); + } + return msg; + } + + /** + * Delete HBase snapshot for backup. + * @param backupCtx backup context + * @throws Exception exception + */ + private static void deleteSnapshot(BackupContext backupCtx, Configuration conf) + throws IOException { + LOG.debug("Trying to delete snapshot for full backup."); + Connection conn = null; + Admin admin = null; + try { + conn = ConnectionFactory.createConnection(conf); + admin = conn.getAdmin(); + for (String snapshotName : backupCtx.getSnapshotNames()) { + if (snapshotName == null) { + continue; + } + LOG.debug("Trying to delete snapshot: " + snapshotName); + admin.deleteSnapshot(snapshotName); + LOG.debug("Deleting the snapshot " + snapshotName + " for backup " + + backupCtx.getBackupId() + " succeeded."); + } + } finally { + if (admin != null) { + admin.close(); + } + if (conn != null) { + conn.close(); + } + } + } + + /** + * Clean up directories with prefix "exportSnapshot-", which are generated when exporting + * snapshots. + * @throws IOException exception + */ + private static void cleanupExportSnapshotLog(Configuration conf) throws IOException { + FileSystem fs = FSUtils.getCurrentFileSystem(conf); + Path stagingDir = + new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory() + .toString())); + FileStatus[] files = FSUtils.listStatus(fs, stagingDir); + if (files == null) { + return; + } + for (FileStatus file : files) { + if (file.getPath().getName().startsWith("exportSnapshot-")) { + LOG.debug("Delete log files of exporting snapshot: " + file.getPath().getName()); + if (FSUtils.delete(fs, file.getPath(), true) == false) { + LOG.warn("Can not delete " + file.getPath()); + } + } + } + } + + /** + * Clean up the uncompleted data at target directory if the ongoing backup has already entered the + * copy phase. + */ + private static void cleanupTargetDir(BackupContext backupContext, Configuration conf) { + try { + // clean up the uncompleted data at target directory if the ongoing backup has already entered + // the copy phase + LOG.debug("Trying to cleanup up target dir. Current backup phase: " + + backupContext.getPhase()); + if (backupContext.getPhase().equals(BackupPhase.SNAPSHOTCOPY) + || backupContext.getPhase().equals(BackupPhase.INCREMENTAL_COPY) + || backupContext.getPhase().equals(BackupPhase.STORE_MANIFEST)) { + FileSystem outputFs = + FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf); + + // now treat one backup as a transaction, clean up data that has been partially copied at + // table level + for (TableName table : backupContext.getTables()) { + Path targetDirPath = + new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(), + backupContext.getBackupId(), table)); + if (outputFs.delete(targetDirPath, true)) { + LOG.info("Cleaning up uncompleted backup data at " + targetDirPath.toString() + + " done."); + } else { + LOG.info("No data has been copied to " + targetDirPath.toString() + "."); + } + + Path tableDir = targetDirPath.getParent(); + FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir); + if (backups == null || backups.length == 0) { + outputFs.delete(tableDir, true); + LOG.debug(tableDir.toString() + " is empty, remove it."); + } + } + } + + } catch (IOException e1) { + LOG.error("Cleaning up uncompleted backup data of " + backupContext.getBackupId() + " at " + + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); + } + } + + /** + * Fail the overall backup. + * @param backupContext backup context + * @param e exception + * @throws Exception exception + */ + static void failBackup(BackupContext backupContext, BackupManager backupManager, Exception e, + String msg, BackupType type, Configuration conf) throws IOException { + LOG.error(msg + getMessage(e)); + // If this is a cancel exception, then we've already cleaned. + + if (backupContext.getState().equals(BackupState.CANCELLED)) { + return; + } + + // set the failure timestamp of the overall backup + backupContext.setEndTs(EnvironmentEdgeManager.currentTime()); + + // set failure message + backupContext.setFailedMsg(e.getMessage()); + + // set overall backup status: failed + backupContext.setState(BackupState.FAILED); + + // compose the backup failed data + String backupFailedData = + "BackupId=" + backupContext.getBackupId() + ",startts=" + backupContext.getStartTs() + + ",failedts=" + backupContext.getEndTs() + ",failedphase=" + backupContext.getPhase() + + ",failedmessage=" + backupContext.getFailedMsg(); + LOG.error(backupFailedData); + + backupManager.updateBackupStatus(backupContext); + + // if full backup, then delete HBase snapshots if there already are snapshots taken + // and also clean up export snapshot log files if exist + if (type == BackupType.FULL) { + deleteSnapshot(backupContext, conf); + cleanupExportSnapshotLog(conf); + } + + // clean up the uncompleted data at target directory if the ongoing backup has already entered + // the copy phase + // For incremental backup, DistCp logs will be cleaned with the targetDir. + cleanupTargetDir(backupContext, conf); + + LOG.info("Backup " + backupContext.getBackupId() + " failed."); + } + + /** + * Do snapshot copy. + * @param backupContext backup context + * @throws Exception exception + */ + private void snapshotCopy(BackupContext backupContext) throws Exception { + LOG.info("Snapshot copy is starting."); + + // set overall backup phase: snapshot_copy + backupContext.setPhase(BackupPhase.SNAPSHOTCOPY); + + // avoid action if has been cancelled + if (backupContext.isCancelled()) { + return; + } + + // call ExportSnapshot to copy files based on hbase snapshot for backup + // ExportSnapshot only support single snapshot export, need loop for multiple tables case + BackupCopyService copyService = BackupRestoreFactory.getBackupCopyService(conf); + + // number of snapshots matches number of tables + float numOfSnapshots = backupContext.getSnapshotNames().size(); + + LOG.debug("There are " + (int) numOfSnapshots + " snapshots to be copied."); + + for (TableName table : backupContext.getTables()) { + // Currently we simply set the sub copy tasks by counting the table snapshot number, we can + // calculate the real files' size for the percentage in the future. + // TODO this below + // backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots); + int res = 0; + String[] args = new String[4]; + args[0] = "-snapshot"; + args[1] = backupContext.getSnapshotName(table); + args[2] = "-copy-to"; + args[3] = backupContext.getBackupStatus(table).getTargetDir(); + + LOG.debug("Copy snapshot " + args[1] + " to " + args[3]); + res = copyService.copy(backupContext, backupManager, conf, BackupCopyService.Type.FULL, args); + // if one snapshot export failed, do not continue for remained snapshots + if (res != 0) { + LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + "."); + + throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3] + + " with reason code " + res); + } + + LOG.info("Snapshot copy " + args[1] + " finished."); + } + } + + /** + * Add manifest for the current backup. The manifest is stored + * within the table backup directory. + * @param backupContext The current backup context + * @throws IOException exception + * @throws BackupException exception + */ + private static void addManifest(BackupContext backupContext, BackupManager backupManager, + BackupType type, Configuration conf) throws IOException, BackupException { + // set the overall backup phase : store manifest + backupContext.setPhase(BackupPhase.STORE_MANIFEST); + + // avoid action if has been cancelled + if (backupContext.isCancelled()) { + return; + } + + BackupManifest manifest; + + // Since we have each table's backup in its own directory structure, + // we'll store its manifest with the table directory. + for (TableName table : backupContext.getTables()) { + manifest = new BackupManifest(backupContext, table); + ArrayList ancestors = backupManager.getAncestors(backupContext, table); + for (BackupImage image : ancestors) { + manifest.addDependentImage(image); + } + + if (type == BackupType.INCREMENTAL) { + // We'll store the log timestamps for this table only in its manifest. + HashMap> tableTimestampMap = + new HashMap>(); + tableTimestampMap.put(table, backupContext.getIncrTimestampMap().get(table)); + manifest.setIncrTimestampMap(tableTimestampMap); + } + manifest.store(conf); + } + + // For incremental backup, we store a overall manifest in + // /WALs/ + // This is used when created the next incremental backup + if (type == BackupType.INCREMENTAL) { + manifest = new BackupManifest(backupContext); + // set the table region server start and end timestamps for incremental backup + manifest.setIncrTimestampMap(backupContext.getIncrTimestampMap()); + ArrayList ancestors = backupManager.getAncestors(backupContext); + for (BackupImage image : ancestors) { + manifest.addDependentImage(image); + } + manifest.store(conf); + } + } + + /** + * Get backup request meta data dir as string. + * @param backupContext backup context + * @return meta data dir + */ + private static String obtainBackupMetaDataStr(BackupContext backupContext) { + StringBuffer sb = new StringBuffer(); + sb.append("type=" + backupContext.getType() + ",tablelist="); + for (TableName table : backupContext.getTables()) { + sb.append(table + ";"); + } + if (sb.lastIndexOf(";") > 0) { + sb.delete(sb.lastIndexOf(";"), sb.lastIndexOf(";") + 1); + } + sb.append(",targetRootDir=" + backupContext.getTargetRootDir()); + + return sb.toString(); + } + + /** + * Clean up directories with prefix "_distcp_logs-", which are generated when DistCp copying + * hlogs. + * @throws IOException exception + */ + private static void cleanupDistCpLog(BackupContext backupContext, Configuration conf) + throws IOException { + Path rootPath = new Path(backupContext.getHLogTargetDir()).getParent(); + FileSystem fs = FileSystem.get(rootPath.toUri(), conf); + FileStatus[] files = FSUtils.listStatus(fs, rootPath); + if (files == null) { + return; + } + for (FileStatus file : files) { + if (file.getPath().getName().startsWith("_distcp_logs")) { + LOG.debug("Delete log files of DistCp: " + file.getPath().getName()); + FSUtils.delete(fs, file.getPath(), true); + } + } + } + + /** + * Complete the overall backup. + * @param backupContext backup context + * @throws Exception exception + */ + static void completeBackup(BackupContext backupContext, BackupManager backupManager, + BackupType type, Configuration conf) throws IOException { + // set the complete timestamp of the overall backup + backupContext.setEndTs(EnvironmentEdgeManager.currentTime()); + // set overall backup status: complete + backupContext.setState(BackupState.COMPLETE); + // add and store the manifest for the backup + addManifest(backupContext, backupManager, type, conf); + + // after major steps done and manifest persisted, do convert if needed for incremental backup + /* in-fly convert code here, provided by future jira */ + LOG.debug("in-fly convert code here, provided by future jira"); + + // compose the backup complete data + String backupCompleteData = + obtainBackupMetaDataStr(backupContext) + ",startts=" + backupContext.getStartTs() + + ",completets=" + backupContext.getEndTs() + ",bytescopied=" + + backupContext.getTotalBytesCopied(); + if (LOG.isDebugEnabled()) { + LOG.debug("Backup " + backupContext.getBackupId() + " finished: " + backupCompleteData); + } + backupManager.updateBackupStatus(backupContext); + + // when full backup is done: + // - delete HBase snapshot + // - clean up directories with prefix "exportSnapshot-", which are generated when exporting + // snapshots + if (type == BackupType.FULL) { + deleteSnapshot(backupContext, conf); + cleanupExportSnapshotLog(conf); + } else if (type == BackupType.INCREMENTAL) { + cleanupDistCpLog(backupContext, conf); + } + + LOG.info("Backup " + backupContext.getBackupId() + " completed."); + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, final FullTableBackupState state) + throws InterruptedException { + if (conf == null) { + conf = env.getMasterConfiguration(); + } + if (backupManager == null) { + try { + backupManager = new BackupManager(env.getMasterConfiguration()); + } catch (IOException ioe) { + setFailure("full backup", ioe); + } + } + if (LOG.isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + HashMap newTimestamps = null; + try { + switch (state) { + case PRE_SNAPSHOT_TABLE: + beginBackup(backupManager, backupContext); + String savedStartCode = null; + boolean firstBackup = false; + // do snapshot for full table backup + + try { + savedStartCode = backupManager.readBackupStartCode(); + firstBackup = savedStartCode == null; + if (firstBackup) { + // This is our first backup. Let's put some marker on ZK so that we can hold the logs + // while we do the backup. + backupManager.writeBackupStartCode(0L); + } + // We roll log here before we do the snapshot. It is possible there is duplicate data + // in the log that is already in the snapshot. But if we do it after the snapshot, we + // could have data loss. + // A better approach is to do the roll log on each RS in the same global procedure as + // the snapshot. + LOG.info("Execute roll log procedure for full backup ..."); + Admin admin = ConnectionFactory.createConnection(env.getMasterConfiguration()) + .getAdmin(); + admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, + LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, new HashMap()); + newTimestamps = backupManager.readRegionServerLastLogRollResult(); + if (firstBackup) { + // Updates registered log files + // We record ALL old WAL files as registered, because + // this is a first full backup in the system and these + // files are not needed for next incremental backup + List logFiles = BackupUtil.getWALFilesOlderThan(conf, newTimestamps); + backupManager.recordWALFiles(logFiles); + } + } catch (BackupException e) { + // fail the overall backup and return + failBackup(backupContext, backupManager, e, "Unexpected BackupException : ", + BackupType.FULL, conf); + return null; + } + setNextState(FullTableBackupState.SNAPSHOT_TABLES); + break; + case SNAPSHOT_TABLES: + for (TableName tableName : tableList) { + addChildProcedure(new SnapshotTableProcedure(env, tableName, + "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_" + + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString())); + } + setNextState(FullTableBackupState.SNAPSHOT_COPY); + break; + case SNAPSHOT_COPY: + // do snapshot copy + try { + this.snapshotCopy(backupContext); + } catch (Exception e) { + // fail the overall backup and return + failBackup(backupContext, backupManager, e, "Unexpected BackupException : ", + BackupType.FULL, conf); + return null; + } + // Updates incremental backup table set + backupManager.addIncrementalBackupTableSet(backupContext.getTables()); + setNextState(FullTableBackupState.BACKUP_COMPLETE); + break; + + case BACKUP_COMPLETE: + // set overall backup status: complete. Here we make sure to complete the backup. After this + // checkpoint, even if entering cancel process, will let the backup finished + backupContext.setState(BackupState.COMPLETE); + // The table list in backupContext is good for both full backup and incremental backup. + // For incremental backup, it contains the incremental backup table set. + backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps); + + HashMap> newTableSetTimestampMap = + backupManager.readLogTimestampMap(); + + Long newStartCode = + BackupUtil.getMinValue(BackupUtil.getRSLogTimestampMins(newTableSetTimestampMap)); + backupManager.writeBackupStartCode(newStartCode); + + // backup complete + completeBackup(backupContext, backupManager, BackupType.FULL, conf); + return Flow.NO_MORE_STATE; + + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + setFailure("snapshot-table", e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final FullTableBackupState state) + throws IOException { + } + + @Override + protected FullTableBackupState getState(final int stateId) { + return FullTableBackupState.valueOf(stateId); + } + + @Override + protected int getStateId(final FullTableBackupState state) { + return state.getNumber(); + } + + @Override + protected FullTableBackupState getInitialState() { + return FullTableBackupState.PRE_SNAPSHOT_TABLE; + } + + @Override + protected void setNextState(final FullTableBackupState state) { + if (aborted.get()) { + setAbortFailure("snapshot-table", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (targetRootDir="); + sb.append(targetRootDir); + sb.append(")"); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + backupContext.toBackupContext().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + backupContext = BackupContext.fromStream(stream); + backupId = backupContext.getBackupId(); + targetRootDir = backupContext.getTargetRootDir(); + tableList = backupContext.getTableNames(); + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (env.waitInitialized(this)) { + return false; + } + return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupProcedure.java new file mode 100644 index 0000000..f05c314 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupProcedure.java @@ -0,0 +1,284 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupRestoreFactory; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupPhase; +import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.IncrementalTableBackupState; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + +@InterfaceAudience.Private +public class IncrementalTableBackupProcedure + extends StateMachineProcedure { + private static final Log LOG = LogFactory.getLog(SnapshotTableProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + private Configuration conf; + private String backupId; + private List tableList; + private String targetRootDir; + + private BackupManager backupManager; + private BackupContext backupContext; + + public IncrementalTableBackupProcedure() { + // Required by the Procedure framework to create the procedure on replay + } + + public IncrementalTableBackupProcedure(final MasterProcedureEnv env, + final String backupId, BackupType type, + List tableList, String targetRootDir) + throws IOException { + backupManager = new BackupManager(env.getMasterConfiguration()); + this.backupId = backupId; + this.tableList = tableList; + this.targetRootDir = targetRootDir; + backupContext = + backupManager.createBackupContext(backupId, type, tableList, targetRootDir); + } + + private List filterMissingFiles(List incrBackupFileList) throws IOException { + FileSystem fs = FileSystem.get(conf); + List list = new ArrayList(); + for(String file : incrBackupFileList){ + if(fs.exists(new Path(file))){ + list.add(file); + } else{ + LOG.warn("Can't find file: "+file); + } + } + return list; + } + + /** + * Do incremental copy. + * @param backupContext backup context + */ + private void incrementalCopy(BackupContext backupContext) throws Exception { + + LOG.info("Incremental copy is starting."); + + // set overall backup phase: incremental_copy + backupContext.setPhase(BackupPhase.INCREMENTAL_COPY); + + // avoid action if has been cancelled + if (backupContext.isCancelled()) { + return; + } + + // get incremental backup file list and prepare parms for DistCp + List incrBackupFileList = backupContext.getIncrBackupFileList(); + // filter missing files out (they have been copied by previous backups) + incrBackupFileList = filterMissingFiles(incrBackupFileList); + String[] strArr = incrBackupFileList.toArray(new String[incrBackupFileList.size() + 1]); + strArr[strArr.length - 1] = backupContext.getHLogTargetDir(); + + BackupCopyService copyService = BackupRestoreFactory.getBackupCopyService(conf); + int res = copyService.copy(backupContext, backupManager, conf, + BackupCopyService.Type.INCREMENTAL, strArr); + + if (res != 0) { + LOG.error("Copy incremental log files failed with return code: " + res + "."); + throw new IOException("Failed of Hadoop Distributed Copy from " + incrBackupFileList + " to " + + backupContext.getHLogTargetDir()); + } + LOG.info("Incremental copy from " + incrBackupFileList + " to " + + backupContext.getHLogTargetDir() + " finished."); + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, final IncrementalTableBackupState state) + throws InterruptedException { + if (conf == null) { + conf = env.getMasterConfiguration(); + } + if (backupManager == null) { + try { + backupManager = new BackupManager(env.getMasterConfiguration()); + } catch (IOException ioe) { + setFailure("incremental backup", ioe); + } + } + if (LOG.isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + HashMap newTimestamps = null; + try { + switch (state) { + case PREPARE_INCREMENTAL: + FullTableBackupProcedure.beginBackup(backupManager, backupContext); + LOG.debug("For incremental backup, current table set is " + + backupManager.getIncrementalBackupTableSet()); + try { + IncrementalBackupManager incrBackupManager = new IncrementalBackupManager(backupManager); + + newTimestamps = incrBackupManager.getIncrBackupLogFileList(backupContext); + } catch (Exception e) { + // fail the overall backup and return + FullTableBackupProcedure.failBackup(backupContext, backupManager, e, + "Unexpected Exception : ", BackupType.INCREMENTAL, conf); + } + + setNextState(IncrementalTableBackupState.INCREMENTAL_COPY); + break; + case INCREMENTAL_COPY: + try { + // copy out the table and region info files for each table + BackupUtil.copyTableRegionInfo(backupContext, conf); + incrementalCopy(backupContext); + // Save list of WAL files copied + backupManager.recordWALFiles(backupContext.getIncrBackupFileList()); + } catch (Exception e) { + // fail the overall backup and return + FullTableBackupProcedure.failBackup(backupContext, backupManager, e, + "Unexpected exception doing incremental copy : ", BackupType.INCREMENTAL, conf); + } + setNextState(IncrementalTableBackupState.INCR_BACKUP_COMPLETE); + break; + case INCR_BACKUP_COMPLETE: + // set overall backup status: complete. Here we make sure to complete the backup. After this + // checkpoint, even if entering cancel process, will let the backup finished + backupContext.setState(BackupState.COMPLETE); + // Set the previousTimestampMap which is before this current log roll to the manifest. + HashMap> previousTimestampMap = + backupManager.readLogTimestampMap(); + backupContext.setIncrTimestampMap(previousTimestampMap); + + // The table list in backupContext is good for both full backup and incremental backup. + // For incremental backup, it contains the incremental backup table set. + backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps); + + HashMap> newTableSetTimestampMap = + backupManager.readLogTimestampMap(); + + Long newStartCode = + BackupUtil.getMinValue(BackupUtil.getRSLogTimestampMins(newTableSetTimestampMap)); + backupManager.writeBackupStartCode(newStartCode); + // backup complete + FullTableBackupProcedure.completeBackup(backupContext, backupManager, + BackupType.INCREMENTAL, conf); + return Flow.NO_MORE_STATE; + + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + setFailure("snapshot-table", e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final IncrementalTableBackupState state) + throws IOException { + } + + @Override + protected IncrementalTableBackupState getState(final int stateId) { + return IncrementalTableBackupState.valueOf(stateId); + } + + @Override + protected int getStateId(final IncrementalTableBackupState state) { + return state.getNumber(); + } + + @Override + protected IncrementalTableBackupState getInitialState() { + return IncrementalTableBackupState.PREPARE_INCREMENTAL; + } + + @Override + protected void setNextState(final IncrementalTableBackupState state) { + if (aborted.get()) { + setAbortFailure("snapshot-table", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (targetRootDir="); + sb.append(targetRootDir); + sb.append(")"); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + backupContext.toBackupContext().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + backupContext = BackupContext.fromStream(stream); + backupId = backupContext.getBackupId(); + targetRootDir = backupContext.getTargetRootDir(); + tableList = backupContext.getTableNames(); + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (env.waitInitialized(this)) { + return false; + } + return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/SnapshotTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/SnapshotTableProcedure.java new file mode 100644 index 0000000..8d8da2c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/SnapshotTableProcedure.java @@ -0,0 +1,189 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableState; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; + +@InterfaceAudience.Private +public class SnapshotTableProcedure + extends StateMachineProcedure { + private static final Log LOG = LogFactory.getLog(SnapshotTableProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + private TableName tableName; + private String snapshotName; + + public SnapshotTableProcedure() { + // Required by the Procedure framework to create the procedure on replay + } + + public SnapshotTableProcedure(final MasterProcedureEnv env, + final TableName tableName, final String snapshotName) + throws IOException { + this.snapshotName = snapshotName; + this.tableName = tableName; + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, final SnapshotTableState state) + throws InterruptedException { + if (LOG.isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + try { + switch (state) { + case SNAPSHOT_TABLE: + HBaseProtos.SnapshotDescription backupSnapshot; + + // wrap a SnapshotDescription for offline/online snapshot + backupSnapshot = this.wrapSnapshotDescription(tableName, snapshotName); + + Admin admin = ConnectionFactory.createConnection(env.getMasterConfiguration()).getAdmin(); + // try to remove existing snapshot + try { + admin.deleteSnapshot(snapshotName); + } catch (IOException e) { + LOG.debug("Unable to delete " + snapshotName, e); + } + // Kick off snapshot for backup + admin.snapshot(backupSnapshot); + // set the snapshot name in BackupStatus of this table, only after snapshot success. + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + setFailure("snapshot-table", e); + } + return Flow.HAS_MORE_STATE; + } + + /** + * Wrap a SnapshotDescription for a target table. + * @param table table + * @return a SnapshotDescription especially for backup. + */ + private SnapshotDescription wrapSnapshotDescription(TableName tableName, String snapshotName) { + // Mock a SnapshotDescription from backupContext to call SnapshotManager function, + // Name it in the format "snapshot__" + HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder(); + builder.setTable(tableName.getNameAsString()); + builder.setName(snapshotName); + HBaseProtos.SnapshotDescription backupSnapshot = builder.build(); + + LOG.debug("Wrapped a SnapshotDescription " + backupSnapshot.getName() + + " from backupContext to request snapshot for backup."); + + return backupSnapshot; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final SnapshotTableState state) + throws IOException { + } + + @Override + protected SnapshotTableState getState(final int stateId) { + return SnapshotTableState.valueOf(stateId); + } + + @Override + protected int getStateId(final SnapshotTableState state) { + return state.getNumber(); + } + + @Override + protected SnapshotTableState getInitialState() { + return SnapshotTableState.SNAPSHOT_TABLE; + } + + @Override + protected void setNextState(final SnapshotTableState state) { + if (aborted.get()) { + setAbortFailure("snapshot-table", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (table="); + sb.append(tableName); + sb.append(")"); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + BackupProtos.SnapshotTableStateData.Builder state = + BackupProtos.SnapshotTableStateData.newBuilder() + .setTable(ProtobufUtil.toProtoTableName(tableName)) + .setSnapshotName(snapshotName); + state.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + BackupProtos.SnapshotTableStateData state = + BackupProtos.SnapshotTableStateData.parseDelimitedFrom(stream); + tableName = ProtobufUtil.toTableName(state.getTable()); + snapshotName = state.getSnapshotName(); + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (!tableName.isSystemTable() && env.waitInitialized(this)) { + return false; + } + return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, tableName); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableExclusiveLock(this, tableName); + } +}