diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index c3b524b8f0741564d8821409f93ac02f12ea4b38..9b899740bbe8bb12afdfe6853e6ed139710fc893 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.security.SecurityCapability; @@ -913,6 +914,47 @@ public interface Admin extends Abortable, Closeable { throws IOException; /** + * Backs up given list of tables fully. Asynchronous operation. This means that + * it may be a while before all your tables are backed up. + * You can use Future.get(long, TimeUnit) to wait on the operation to complete. + * + * @param type whether the backup is full or incremental + * @param tableList list of tables to backup + * @param targetRootDir root dir for saving the backup + * @param workers number of paralle workers. -1 - system defined + * @param bandwidth bandwidth per worker in MB per sec. -1 - unlimited + * @return the result of the async backup. You can use Future.get(long, TimeUnit) to wait on the + * operation to complete + */ + public Future backupTables(final BackupType type, + final List tableList, final String targetRootDir, final int workers, + final long bandwidth) throws IOException; + + /** + * Abort a backup. + * @param backupId ID of the backup + * @param mayInterruptIfRunning if the backup completed at least one step, should it be aborted? + * @return true if aborted, false if backup already completed or does not exist + * @throws IOException + */ + public boolean abortBackup(final String backupId, final boolean mayInterruptIfRunning) + throws IOException; + + /** + * Backs up given list of tables fully. Synchronous operation. + * + * @param type whether the backup is full or incremental + * @param tableList list of tables to backup + * @param targetRootDir root dir for saving the backup + * @param workers number of paralle workers. -1 - system defined + * @param bandwidth bandwidth per worker in MB per sec. -1 - unlimited + * @return the backup Id + */ + public String backupTablesSync(final BackupType type, + final List tableList, final String targetRootDir, final int workers, + final long bandwidth) throws IOException; + + /** * Modify an existing table, more IRB friendly version. Asynchronous operation. This means that * it may be a while before your schema change is updated across all of the table. * You can use Future.get(long, TimeUnit) to wait on the operation to complete. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index 64eb9fbd83a9f058cbf1a9edeee56dc359eb2579..bb6172730a5ac5929eb391c5aa9df19575fa7ec7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -1394,6 +1394,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } @Override + public MasterProtos.AbortBackupResponse abortBackup( + RpcController controller, + MasterProtos.AbortBackupRequest request) throws ServiceException { + return stub.abortBackup(controller, request); + } + + @Override public MasterProtos.ListProceduresResponse listProcedures( RpcController controller, MasterProtos.ListProceduresRequest request) throws ServiceException { @@ -1401,6 +1408,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } @Override + public MasterProtos.BackupTablesResponse backupTables( + RpcController controller, + MasterProtos.BackupTablesRequest request) throws ServiceException { + return stub.backupTables(controller, request); + } + + @Override public MasterProtos.AddColumnResponse addColumn( RpcController controller, MasterProtos.AddColumnRequest request) throws ServiceException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index c2a0bb8695886b09484e04db16355efce8a294b5..8fada50c56fe1fc4ad2bc22157cacb9f92d17e9c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -61,6 +61,8 @@ import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.BackupClientUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.security.SecurityCapability; @@ -83,6 +85,10 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterReque import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; @@ -258,6 +264,23 @@ public class HBaseAdmin implements Admin { } @Override + public boolean abortBackup( + final String backupId, + final boolean mayInterruptIfRunning) throws IOException { + Boolean abortBackupResponse = executeCallable( + new MasterCallable(getConnection()) { + @Override + public AbortBackupResponse call(int callTimeout) throws ServiceException { + AbortBackupRequest abortBackupRequest = + AbortBackupRequest.newBuilder().setBackupId(backupId).build(); + return master.abortBackup(null, abortBackupRequest); + } + }).getIsBackupAborted(); + + return abortBackupResponse; + } + + @Override public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning) throws IOException { return get(abortProcedureAsync(procId, mayInterruptIfRunning), this.syncWaitTimeout, @@ -1568,6 +1591,69 @@ public class HBaseAdmin implements Admin { } @Override + public Future backupTables(final BackupType type, + final List tableList, final String targetRootDir, final int workers, + final long bandwidth) throws IOException { + BackupClientUtil.checkTargetDir(targetRootDir, conf); + if (tableList != null) { + for (TableName table : tableList) { + if (!tableExists(table)) { + throw new DoNotRetryIOException(table + "does not exist"); + } + } + } + + BackupTablesResponse response = executeCallable( + new MasterCallable(getConnection()) { + @Override + public BackupTablesResponse call(int callTimeout) throws ServiceException { + BackupTablesRequest request = RequestConverter.buildBackupTablesRequest( + type, tableList, targetRootDir, workers, bandwidth); + return master.backupTables(null, request); + } + }); + return new TableBackupFuture(this, TableName.BACKUP_TABLE_NAME, response); + } + + @Override + public String backupTablesSync(final BackupType type, + final List tableList, final String targetRootDir, final int workers, + final long bandwidth) throws IOException { + return get( + backupTables(type, tableList, targetRootDir, workers, bandwidth), + syncWaitTimeout, + TimeUnit.MILLISECONDS); + } + + private static class TableBackupFuture extends TableFuture { + String backupId; + public TableBackupFuture(final HBaseAdmin admin, final TableName tableName, + final BackupTablesResponse response) { + super(admin, tableName, + (response != null && response.hasProcId()) ? response.getProcId() : null); + backupId = response.getBackupId(); + } + + @Override + public String getOperationType() { + return "BACKUP"; + } + + @Override + protected String convertResult(final GetProcedureResultResponse response) throws IOException { + ByteString result = response.getResult(); + if (result == null) return null; + return Bytes.toStringBinary(result.toByteArray()); + } + + @Override + protected String postOperationResult(final String result, + final long deadlineTs) throws IOException, TimeoutException { + return result; + } + } + + @Override public Future modifyTable(final TableName tableName, final HTableDescriptor htd) throws IOException { if (!tableName.equals(htd.getTableName())) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 261a9aae5e576da04227a77b76db6e73f31c86a3..1ba3af24b7f86c99eeab0c6427e777139bebb924 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagUtil; +import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Consistency; @@ -93,6 +94,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; import org.apache.hadoop.hbase.protobuf.generated.CellProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; @@ -2727,6 +2729,10 @@ public final class ProtobufUtil { return tableNames; } + public static BackupProtos.BackupType toProtoBackupType(BackupType type) { + return BackupProtos.BackupType.valueOf(type.name()); + } + /** * Convert a protocol buffer CellVisibility to a client CellVisibility * diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 99e993d98b6f3d8ae8fe1e28f251d84e175a3cb7..5babfb81542e0698d6801e95ce9d07f5a9bca46a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Action; import org.apache.hadoop.hbase.client.Admin; @@ -62,6 +63,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath; @@ -1268,6 +1270,22 @@ public final class RequestConverter { return builder.build(); } + public static BackupTablesRequest buildBackupTablesRequest( + final BackupType type, List tableList, String targetRootDir, final int workers, + final long bandwidth) { + BackupTablesRequest.Builder builder = BackupTablesRequest.newBuilder(); + builder.setType(ProtobufUtil.toProtoBackupType(type)); + builder.setTargetRootDir(targetRootDir); + builder.setWorkers(workers); + builder.setBandwidth(bandwidth); + if (tableList != null) { + for (TableName table : tableList) { + builder.addTables(ProtobufUtil.toProtoTableName(table)); + } + } + return builder.build(); + } + /** * Creates a protocol buffer GetSchemaAlterStatusRequest * diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java index 1a7a1ba10ab3aa2c20f0ffc9d56fd19e721cf059..13ce6103c13bf1702c5b75f38c125822eb22e544 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java @@ -9,6 +9,270 @@ public final class BackupProtos { com.google.protobuf.ExtensionRegistry registry) { } /** + * Protobuf enum {@code hbase.pb.FullTableBackupState} + */ + public enum FullTableBackupState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * PRE_SNAPSHOT_TABLE = 1; + */ + PRE_SNAPSHOT_TABLE(0, 1), + /** + * SNAPSHOT_TABLES = 2; + */ + SNAPSHOT_TABLES(1, 2), + /** + * SNAPSHOT_COPY = 3; + */ + SNAPSHOT_COPY(2, 3), + /** + * BACKUP_COMPLETE = 4; + */ + BACKUP_COMPLETE(3, 4), + ; + + /** + * PRE_SNAPSHOT_TABLE = 1; + */ + public static final int PRE_SNAPSHOT_TABLE_VALUE = 1; + /** + * SNAPSHOT_TABLES = 2; + */ + public static final int SNAPSHOT_TABLES_VALUE = 2; + /** + * SNAPSHOT_COPY = 3; + */ + public static final int SNAPSHOT_COPY_VALUE = 3; + /** + * BACKUP_COMPLETE = 4; + */ + public static final int BACKUP_COMPLETE_VALUE = 4; + + + public final int getNumber() { return value; } + + public static FullTableBackupState valueOf(int value) { + switch (value) { + case 1: return PRE_SNAPSHOT_TABLE; + case 2: return SNAPSHOT_TABLES; + case 3: return SNAPSHOT_COPY; + case 4: return BACKUP_COMPLETE; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public FullTableBackupState findValueByNumber(int number) { + return FullTableBackupState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(0); + } + + private static final FullTableBackupState[] VALUES = values(); + + public static FullTableBackupState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private FullTableBackupState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.FullTableBackupState) + } + + /** + * Protobuf enum {@code hbase.pb.IncrementalTableBackupState} + */ + public enum IncrementalTableBackupState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * PREPARE_INCREMENTAL = 1; + */ + PREPARE_INCREMENTAL(0, 1), + /** + * INCREMENTAL_COPY = 2; + */ + INCREMENTAL_COPY(1, 2), + /** + * INCR_BACKUP_COMPLETE = 3; + */ + INCR_BACKUP_COMPLETE(2, 3), + ; + + /** + * PREPARE_INCREMENTAL = 1; + */ + public static final int PREPARE_INCREMENTAL_VALUE = 1; + /** + * INCREMENTAL_COPY = 2; + */ + public static final int INCREMENTAL_COPY_VALUE = 2; + /** + * INCR_BACKUP_COMPLETE = 3; + */ + public static final int INCR_BACKUP_COMPLETE_VALUE = 3; + + + public final int getNumber() { return value; } + + public static IncrementalTableBackupState valueOf(int value) { + switch (value) { + case 1: return PREPARE_INCREMENTAL; + case 2: return INCREMENTAL_COPY; + case 3: return INCR_BACKUP_COMPLETE; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public IncrementalTableBackupState findValueByNumber(int number) { + return IncrementalTableBackupState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(1); + } + + private static final IncrementalTableBackupState[] VALUES = values(); + + public static IncrementalTableBackupState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private IncrementalTableBackupState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.IncrementalTableBackupState) + } + + /** + * Protobuf enum {@code hbase.pb.SnapshotTableState} + */ + public enum SnapshotTableState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * SNAPSHOT_TABLE = 1; + */ + SNAPSHOT_TABLE(0, 1), + ; + + /** + * SNAPSHOT_TABLE = 1; + */ + public static final int SNAPSHOT_TABLE_VALUE = 1; + + + public final int getNumber() { return value; } + + public static SnapshotTableState valueOf(int value) { + switch (value) { + case 1: return SNAPSHOT_TABLE; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public SnapshotTableState findValueByNumber(int number) { + return SnapshotTableState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(2); + } + + private static final SnapshotTableState[] VALUES = values(); + + public static SnapshotTableState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private SnapshotTableState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.SnapshotTableState) + } + + /** * Protobuf enum {@code hbase.pb.BackupType} */ public enum BackupType @@ -65,7 +329,7 @@ public final class BackupProtos { } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(0); + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(3); } private static final BackupType[] VALUES = values(); @@ -90,138 +354,57 @@ public final class BackupProtos { // @@protoc_insertion_point(enum_scope:hbase.pb.BackupType) } - public interface BackupImageOrBuilder + public interface SnapshotTableStateDataOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required string backup_id = 1; - /** - * required string backup_id = 1; - */ - boolean hasBackupId(); - /** - * required string backup_id = 1; - */ - java.lang.String getBackupId(); + // required .hbase.pb.TableName table = 1; /** - * required string backup_id = 1; + * required .hbase.pb.TableName table = 1; */ - com.google.protobuf.ByteString - getBackupIdBytes(); - - // required .hbase.pb.BackupType backup_type = 2; + boolean hasTable(); /** - * required .hbase.pb.BackupType backup_type = 2; + * required .hbase.pb.TableName table = 1; */ - boolean hasBackupType(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable(); /** - * required .hbase.pb.BackupType backup_type = 2; + * required .hbase.pb.TableName table = 1; */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder(); - // required string root_dir = 3; + // required string snapshotName = 2; /** - * required string root_dir = 3; + * required string snapshotName = 2; */ - boolean hasRootDir(); + boolean hasSnapshotName(); /** - * required string root_dir = 3; + * required string snapshotName = 2; */ - java.lang.String getRootDir(); + java.lang.String getSnapshotName(); /** - * required string root_dir = 3; + * required string snapshotName = 2; */ com.google.protobuf.ByteString - getRootDirBytes(); + getSnapshotNameBytes(); + } + /** + * Protobuf type {@code hbase.pb.SnapshotTableStateData} + */ + public static final class SnapshotTableStateData extends + com.google.protobuf.GeneratedMessage + implements SnapshotTableStateDataOrBuilder { + // Use SnapshotTableStateData.newBuilder() to construct. + private SnapshotTableStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SnapshotTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - // repeated .hbase.pb.TableName table_list = 4; - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - java.util.List - getTableListList(); - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index); - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - int getTableListCount(); - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - java.util.List - getTableListOrBuilderList(); - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( - int index); + private static final SnapshotTableStateData defaultInstance; + public static SnapshotTableStateData getDefaultInstance() { + return defaultInstance; + } - // required uint64 start_ts = 5; - /** - * required uint64 start_ts = 5; - */ - boolean hasStartTs(); - /** - * required uint64 start_ts = 5; - */ - long getStartTs(); - - // required uint64 complete_ts = 6; - /** - * required uint64 complete_ts = 6; - */ - boolean hasCompleteTs(); - /** - * required uint64 complete_ts = 6; - */ - long getCompleteTs(); - - // repeated .hbase.pb.BackupImage ancestors = 7; - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - java.util.List - getAncestorsList(); - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getAncestors(int index); - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - int getAncestorsCount(); - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - java.util.List - getAncestorsOrBuilderList(); - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getAncestorsOrBuilder( - int index); - } - /** - * Protobuf type {@code hbase.pb.BackupImage} - */ - public static final class BackupImage extends - com.google.protobuf.GeneratedMessage - implements BackupImageOrBuilder { - // Use BackupImage.newBuilder() to construct. - private BackupImage(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private BackupImage(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final BackupImage defaultInstance; - public static BackupImage getDefaultInstance() { - return defaultInstance; - } - - public BackupImage getDefaultInstanceForType() { + public SnapshotTableStateData getDefaultInstanceForType() { return defaultInstance; } @@ -231,7 +414,7 @@ public final class BackupProtos { getUnknownFields() { return this.unknownFields; } - private BackupImage( + private SnapshotTableStateData( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -255,50 +438,21 @@ public final class BackupProtos { break; } case 10: { - bitField0_ |= 0x00000001; - backupId_ = input.readBytes(); - break; - } - case 16: { - int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue); - if (value == null) { - unknownFields.mergeVarintField(2, rawValue); - } else { - bitField0_ |= 0x00000002; - backupType_ = value; + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = table_.toBuilder(); } - break; - } - case 26: { - bitField0_ |= 0x00000004; - rootDir_ = input.readBytes(); - break; - } - case 34: { - if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { - tableList_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000008; + table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(table_); + table_ = subBuilder.buildPartial(); } - tableList_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); - break; - } - case 40: { - bitField0_ |= 0x00000008; - startTs_ = input.readUInt64(); - break; - } - case 48: { - bitField0_ |= 0x00000010; - completeTs_ = input.readUInt64(); + bitField0_ |= 0x00000001; break; } - case 58: { - if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { - ancestors_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000040; - } - ancestors_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.PARSER, extensionRegistry)); + case 18: { + bitField0_ |= 0x00000002; + snapshotName_ = input.readBytes(); break; } } @@ -309,117 +463,74 @@ public final class BackupProtos { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { - tableList_ = java.util.Collections.unmodifiableList(tableList_); - } - if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { - ancestors_ = java.util.Collections.unmodifiableList(ancestors_); - } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public BackupImage parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SnapshotTableStateData parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new BackupImage(input, extensionRegistry); + return new SnapshotTableStateData(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } private int bitField0_; - // required string backup_id = 1; - public static final int BACKUP_ID_FIELD_NUMBER = 1; - private java.lang.Object backupId_; + // required .hbase.pb.TableName table = 1; + public static final int TABLE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_; /** - * required string backup_id = 1; + * required .hbase.pb.TableName table = 1; */ - public boolean hasBackupId() { + public boolean hasTable() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required string backup_id = 1; + * required .hbase.pb.TableName table = 1; */ - public java.lang.String getBackupId() { - java.lang.Object ref = backupId_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - backupId_ = s; - } - return s; - } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + return table_; } /** - * required string backup_id = 1; + * required .hbase.pb.TableName table = 1; */ - public com.google.protobuf.ByteString - getBackupIdBytes() { - java.lang.Object ref = backupId_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + return table_; } - // required .hbase.pb.BackupType backup_type = 2; - public static final int BACKUP_TYPE_FIELD_NUMBER = 2; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType backupType_; + // required string snapshotName = 2; + public static final int SNAPSHOTNAME_FIELD_NUMBER = 2; + private java.lang.Object snapshotName_; /** - * required .hbase.pb.BackupType backup_type = 2; + * required string snapshotName = 2; */ - public boolean hasBackupType() { + public boolean hasSnapshotName() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * required .hbase.pb.BackupType backup_type = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType() { - return backupType_; - } - - // required string root_dir = 3; - public static final int ROOT_DIR_FIELD_NUMBER = 3; - private java.lang.Object rootDir_; - /** - * required string root_dir = 3; - */ - public boolean hasRootDir() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required string root_dir = 3; + * required string snapshotName = 2; */ - public java.lang.String getRootDir() { - java.lang.Object ref = rootDir_; + public java.lang.String getSnapshotName() { + java.lang.Object ref = snapshotName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { @@ -427,205 +538,61 @@ public final class BackupProtos { (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - rootDir_ = s; + snapshotName_ = s; } return s; } } /** - * required string root_dir = 3; + * required string snapshotName = 2; */ public com.google.protobuf.ByteString - getRootDirBytes() { - java.lang.Object ref = rootDir_; + getSnapshotNameBytes() { + java.lang.Object ref = snapshotName_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - rootDir_ = b; + snapshotName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - // repeated .hbase.pb.TableName table_list = 4; - public static final int TABLE_LIST_FIELD_NUMBER = 4; - private java.util.List tableList_; - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public java.util.List getTableListList() { - return tableList_; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public java.util.List - getTableListOrBuilderList() { - return tableList_; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public int getTableListCount() { - return tableList_.size(); - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { - return tableList_.get(index); + private void initFields() { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + snapshotName_ = ""; } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( - int index) { - return tableList_.get(index); - } - - // required uint64 start_ts = 5; - public static final int START_TS_FIELD_NUMBER = 5; - private long startTs_; - /** - * required uint64 start_ts = 5; - */ - public boolean hasStartTs() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * required uint64 start_ts = 5; - */ - public long getStartTs() { - return startTs_; - } - - // required uint64 complete_ts = 6; - public static final int COMPLETE_TS_FIELD_NUMBER = 6; - private long completeTs_; - /** - * required uint64 complete_ts = 6; - */ - public boolean hasCompleteTs() { - return ((bitField0_ & 0x00000010) == 0x00000010); - } - /** - * required uint64 complete_ts = 6; - */ - public long getCompleteTs() { - return completeTs_; - } - - // repeated .hbase.pb.BackupImage ancestors = 7; - public static final int ANCESTORS_FIELD_NUMBER = 7; - private java.util.List ancestors_; - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - public java.util.List getAncestorsList() { - return ancestors_; - } - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - public java.util.List - getAncestorsOrBuilderList() { - return ancestors_; - } - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - public int getAncestorsCount() { - return ancestors_.size(); - } - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getAncestors(int index) { - return ancestors_.get(index); - } - /** - * repeated .hbase.pb.BackupImage ancestors = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getAncestorsOrBuilder( - int index) { - return ancestors_.get(index); - } - - private void initFields() { - backupId_ = ""; - backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - rootDir_ = ""; - tableList_ = java.util.Collections.emptyList(); - startTs_ = 0L; - completeTs_ = 0L; - ancestors_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasBackupId()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasBackupType()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasRootDir()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasStartTs()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasCompleteTs()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getTableListCount(); i++) { - if (!getTableList(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - for (int i = 0; i < getAncestorsCount(); i++) { - if (!getAncestors(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTable()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSnapshotName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTable().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getBackupIdBytes()); + output.writeMessage(1, table_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeEnum(2, backupType_.getNumber()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getRootDirBytes()); - } - for (int i = 0; i < tableList_.size(); i++) { - output.writeMessage(4, tableList_.get(i)); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeUInt64(5, startTs_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeUInt64(6, completeTs_); - } - for (int i = 0; i < ancestors_.size(); i++) { - output.writeMessage(7, ancestors_.get(i)); + output.writeBytes(2, getSnapshotNameBytes()); } getUnknownFields().writeTo(output); } @@ -638,31 +605,11 @@ public final class BackupProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getBackupIdBytes()); + .computeMessageSize(1, table_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeEnumSize(2, backupType_.getNumber()); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getRootDirBytes()); - } - for (int i = 0; i < tableList_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, tableList_.get(i)); - } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(5, startTs_); - } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(6, completeTs_); - } - for (int i = 0; i < ancestors_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(7, ancestors_.get(i)); + .computeBytesSize(2, getSnapshotNameBytes()); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -681,41 +628,22 @@ public final class BackupProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) obj; + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData) obj; boolean result = true; - result = result && (hasBackupId() == other.hasBackupId()); - if (hasBackupId()) { - result = result && getBackupId() - .equals(other.getBackupId()); - } - result = result && (hasBackupType() == other.hasBackupType()); - if (hasBackupType()) { - result = result && - (getBackupType() == other.getBackupType()); - } - result = result && (hasRootDir() == other.hasRootDir()); - if (hasRootDir()) { - result = result && getRootDir() - .equals(other.getRootDir()); - } - result = result && getTableListList() - .equals(other.getTableListList()); - result = result && (hasStartTs() == other.hasStartTs()); - if (hasStartTs()) { - result = result && (getStartTs() - == other.getStartTs()); + result = result && (hasTable() == other.hasTable()); + if (hasTable()) { + result = result && getTable() + .equals(other.getTable()); } - result = result && (hasCompleteTs() == other.hasCompleteTs()); - if (hasCompleteTs()) { - result = result && (getCompleteTs() - == other.getCompleteTs()); + result = result && (hasSnapshotName() == other.hasSnapshotName()); + if (hasSnapshotName()) { + result = result && getSnapshotName() + .equals(other.getSnapshotName()); } - result = result && getAncestorsList() - .equals(other.getAncestorsList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -729,86 +657,66 @@ public final class BackupProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBackupId()) { - hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; - hash = (53 * hash) + getBackupId().hashCode(); - } - if (hasBackupType()) { - hash = (37 * hash) + BACKUP_TYPE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getBackupType()); - } - if (hasRootDir()) { - hash = (37 * hash) + ROOT_DIR_FIELD_NUMBER; - hash = (53 * hash) + getRootDir().hashCode(); - } - if (getTableListCount() > 0) { - hash = (37 * hash) + TABLE_LIST_FIELD_NUMBER; - hash = (53 * hash) + getTableListList().hashCode(); - } - if (hasStartTs()) { - hash = (37 * hash) + START_TS_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getStartTs()); - } - if (hasCompleteTs()) { - hash = (37 * hash) + COMPLETE_TS_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getCompleteTs()); + if (hasTable()) { + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); } - if (getAncestorsCount() > 0) { - hash = (37 * hash) + ANCESTORS_FIELD_NUMBER; - hash = (53 * hash) + getAncestorsList().hashCode(); + if (hasSnapshotName()) { + hash = (37 * hash) + SNAPSHOTNAME_FIELD_NUMBER; + hash = (53 * hash) + getSnapshotName().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -817,7 +725,7 @@ public final class BackupProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -829,24 +737,24 @@ public final class BackupProtos { return builder; } /** - * Protobuf type {@code hbase.pb.BackupImage} + * Protobuf type {@code hbase.pb.SnapshotTableStateData} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateDataOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -858,8 +766,7 @@ public final class BackupProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTableListFieldBuilder(); - getAncestorsFieldBuilder(); + getTableFieldBuilder(); } } private static Builder create() { @@ -868,28 +775,14 @@ public final class BackupProtos { public Builder clear() { super.clear(); - backupId_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - bitField0_ = (bitField0_ & ~0x00000002); - rootDir_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - if (tableListBuilder_ == null) { - tableList_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); } else { - tableListBuilder_.clear(); - } - startTs_ = 0L; - bitField0_ = (bitField0_ & ~0x00000010); - completeTs_ = 0L; - bitField0_ = (bitField0_ & ~0x00000020); - if (ancestorsBuilder_ == null) { - ancestors_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000040); - } else { - ancestorsBuilder_.clear(); + tableBuilder_.clear(); } + bitField0_ = (bitField0_ & ~0x00000001); + snapshotName_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -899,187 +792,78 @@ public final class BackupProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage build() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage(this); + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.backupId_ = backupId_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.backupType_ = backupType_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.rootDir_ = rootDir_; - if (tableListBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008)) { - tableList_ = java.util.Collections.unmodifiableList(tableList_); - bitField0_ = (bitField0_ & ~0x00000008); - } - result.tableList_ = tableList_; + if (tableBuilder_ == null) { + result.table_ = table_; } else { - result.tableList_ = tableListBuilder_.build(); - } - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000008; - } - result.startTs_ = startTs_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000010; + result.table_ = tableBuilder_.build(); } - result.completeTs_ = completeTs_; - if (ancestorsBuilder_ == null) { - if (((bitField0_ & 0x00000040) == 0x00000040)) { - ancestors_ = java.util.Collections.unmodifiableList(ancestors_); - bitField0_ = (bitField0_ & ~0x00000040); - } - result.ancestors_ = ancestors_; - } else { - result.ancestors_ = ancestorsBuilder_.build(); + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; } + result.snapshotName_ = snapshotName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()) return this; - if (other.hasBackupId()) { - bitField0_ |= 0x00000001; - backupId_ = other.backupId_; - onChanged(); - } - if (other.hasBackupType()) { - setBackupType(other.getBackupType()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.getDefaultInstance()) return this; + if (other.hasTable()) { + mergeTable(other.getTable()); } - if (other.hasRootDir()) { - bitField0_ |= 0x00000004; - rootDir_ = other.rootDir_; + if (other.hasSnapshotName()) { + bitField0_ |= 0x00000002; + snapshotName_ = other.snapshotName_; onChanged(); } - if (tableListBuilder_ == null) { - if (!other.tableList_.isEmpty()) { - if (tableList_.isEmpty()) { - tableList_ = other.tableList_; - bitField0_ = (bitField0_ & ~0x00000008); - } else { - ensureTableListIsMutable(); - tableList_.addAll(other.tableList_); - } - onChanged(); - } - } else { - if (!other.tableList_.isEmpty()) { - if (tableListBuilder_.isEmpty()) { - tableListBuilder_.dispose(); - tableListBuilder_ = null; - tableList_ = other.tableList_; - bitField0_ = (bitField0_ & ~0x00000008); - tableListBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getTableListFieldBuilder() : null; - } else { - tableListBuilder_.addAllMessages(other.tableList_); - } - } - } - if (other.hasStartTs()) { - setStartTs(other.getStartTs()); - } - if (other.hasCompleteTs()) { - setCompleteTs(other.getCompleteTs()); - } - if (ancestorsBuilder_ == null) { - if (!other.ancestors_.isEmpty()) { - if (ancestors_.isEmpty()) { - ancestors_ = other.ancestors_; - bitField0_ = (bitField0_ & ~0x00000040); - } else { - ensureAncestorsIsMutable(); - ancestors_.addAll(other.ancestors_); - } - onChanged(); - } - } else { - if (!other.ancestors_.isEmpty()) { - if (ancestorsBuilder_.isEmpty()) { - ancestorsBuilder_.dispose(); - ancestorsBuilder_ = null; - ancestors_ = other.ancestors_; - bitField0_ = (bitField0_ & ~0x00000040); - ancestorsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getAncestorsFieldBuilder() : null; - } else { - ancestorsBuilder_.addAllMessages(other.ancestors_); - } - } - } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasBackupId()) { - - return false; - } - if (!hasBackupType()) { - - return false; - } - if (!hasRootDir()) { + if (!hasTable()) { return false; } - if (!hasStartTs()) { + if (!hasSnapshotName()) { return false; } - if (!hasCompleteTs()) { + if (!getTable().isInitialized()) { return false; } - for (int i = 0; i < getTableListCount(); i++) { - if (!getTableList(i).isInitialized()) { - - return false; - } - } - for (int i = 0; i < getAncestorsCount(); i++) { - if (!getAncestors(i).isInitialized()) { - - return false; - } - } return true; } @@ -1087,11 +871,11 @@ public final class BackupProtos { com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -1102,367 +886,1581 @@ public final class BackupProtos { } private int bitField0_; - // required string backup_id = 1; - private java.lang.Object backupId_ = ""; + // required .hbase.pb.TableName table = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableBuilder_; /** - * required string backup_id = 1; + * required .hbase.pb.TableName table = 1; */ - public boolean hasBackupId() { + public boolean hasTable() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required string backup_id = 1; + * required .hbase.pb.TableName table = 1; */ - public java.lang.String getBackupId() { - java.lang.Object ref = backupId_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - backupId_ = s; - return s; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + if (tableBuilder_ == null) { + return table_; } else { - return (java.lang.String) ref; + return tableBuilder_.getMessage(); } } /** - * required string backup_id = 1; + * required .hbase.pb.TableName table = 1; */ - public com.google.protobuf.ByteString - getBackupIdBytes() { - java.lang.Object ref = backupId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupId_ = b; - return b; + public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + onChanged(); } else { - return (com.google.protobuf.ByteString) ref; + tableBuilder_.setMessage(value); } + bitField0_ |= 0x00000001; + return this; } /** - * required string backup_id = 1; + * required .hbase.pb.TableName table = 1; */ - public Builder setBackupId( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - backupId_ = value; - onChanged(); + public Builder setTable( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableBuilder_ == null) { + table_ = builderForValue.build(); + onChanged(); + } else { + tableBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; return this; } /** - * required string backup_id = 1; + * required .hbase.pb.TableName table = 1; */ - public Builder clearBackupId() { - bitField0_ = (bitField0_ & ~0x00000001); - backupId_ = getDefaultInstance().getBackupId(); - onChanged(); + public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + table_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial(); + } else { + table_ = value; + } + onChanged(); + } else { + tableBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; return this; } /** - * required string backup_id = 1; + * required .hbase.pb.TableName table = 1; */ - public Builder setBackupIdBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - backupId_ = value; - onChanged(); + public Builder clearTable() { + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); return this; } - - // required .hbase.pb.BackupType backup_type = 2; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - /** - * required .hbase.pb.BackupType backup_type = 2; - */ - public boolean hasBackupType() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } /** - * required .hbase.pb.BackupType backup_type = 2; + * required .hbase.pb.TableName table = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType() { - return backupType_; + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableFieldBuilder().getBuilder(); } /** - * required .hbase.pb.BackupType backup_type = 2; + * required .hbase.pb.TableName table = 1; */ - public Builder setBackupType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { - if (value == null) { - throw new NullPointerException(); + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + if (tableBuilder_ != null) { + return tableBuilder_.getMessageOrBuilder(); + } else { + return table_; } - bitField0_ |= 0x00000002; - backupType_ = value; - onChanged(); - return this; } /** - * required .hbase.pb.BackupType backup_type = 2; + * required .hbase.pb.TableName table = 1; */ - public Builder clearBackupType() { - bitField0_ = (bitField0_ & ~0x00000002); - backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - onChanged(); - return this; + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableFieldBuilder() { + if (tableBuilder_ == null) { + tableBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + table_, + getParentForChildren(), + isClean()); + table_ = null; + } + return tableBuilder_; } - // required string root_dir = 3; - private java.lang.Object rootDir_ = ""; + // required string snapshotName = 2; + private java.lang.Object snapshotName_ = ""; /** - * required string root_dir = 3; + * required string snapshotName = 2; */ - public boolean hasRootDir() { - return ((bitField0_ & 0x00000004) == 0x00000004); + public boolean hasSnapshotName() { + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * required string root_dir = 3; + * required string snapshotName = 2; */ - public java.lang.String getRootDir() { - java.lang.Object ref = rootDir_; + public java.lang.String getSnapshotName() { + java.lang.Object ref = snapshotName_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); - rootDir_ = s; + snapshotName_ = s; return s; } else { return (java.lang.String) ref; } } /** - * required string root_dir = 3; + * required string snapshotName = 2; */ public com.google.protobuf.ByteString - getRootDirBytes() { - java.lang.Object ref = rootDir_; + getSnapshotNameBytes() { + java.lang.Object ref = snapshotName_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - rootDir_ = b; + snapshotName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** - * required string root_dir = 3; + * required string snapshotName = 2; */ - public Builder setRootDir( + public Builder setSnapshotName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000004; - rootDir_ = value; + bitField0_ |= 0x00000002; + snapshotName_ = value; onChanged(); return this; } /** - * required string root_dir = 3; + * required string snapshotName = 2; */ - public Builder clearRootDir() { - bitField0_ = (bitField0_ & ~0x00000004); - rootDir_ = getDefaultInstance().getRootDir(); + public Builder clearSnapshotName() { + bitField0_ = (bitField0_ & ~0x00000002); + snapshotName_ = getDefaultInstance().getSnapshotName(); onChanged(); return this; } /** - * required string root_dir = 3; + * required string snapshotName = 2; */ - public Builder setRootDirBytes( + public Builder setSnapshotNameBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000004; - rootDir_ = value; + bitField0_ |= 0x00000002; + snapshotName_ = value; onChanged(); return this; } - // repeated .hbase.pb.TableName table_list = 4; - private java.util.List tableList_ = - java.util.Collections.emptyList(); - private void ensureTableListIsMutable() { - if (!((bitField0_ & 0x00000008) == 0x00000008)) { - tableList_ = new java.util.ArrayList(tableList_); - bitField0_ |= 0x00000008; - } - } + // @@protoc_insertion_point(builder_scope:hbase.pb.SnapshotTableStateData) + } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableListBuilder_; + static { + defaultInstance = new SnapshotTableStateData(true); + defaultInstance.initFields(); + } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public java.util.List getTableListList() { - if (tableListBuilder_ == null) { - return java.util.Collections.unmodifiableList(tableList_); - } else { - return tableListBuilder_.getMessageList(); - } - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public int getTableListCount() { - if (tableListBuilder_ == null) { - return tableList_.size(); - } else { - return tableListBuilder_.getCount(); - } - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { - if (tableListBuilder_ == null) { - return tableList_.get(index); - } else { - return tableListBuilder_.getMessage(index); - } - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder setTableList( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tableListBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTableListIsMutable(); - tableList_.set(index, value); - onChanged(); - } else { - tableListBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder setTableList( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (tableListBuilder_ == null) { - ensureTableListIsMutable(); - tableList_.set(index, builderForValue.build()); - onChanged(); - } else { - tableListBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder addTableList(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tableListBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTableListIsMutable(); - tableList_.add(value); - onChanged(); - } else { - tableListBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder addTableList( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { - if (tableListBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTableListIsMutable(); - tableList_.add(index, value); - onChanged(); - } else { - tableListBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder addTableList( - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (tableListBuilder_ == null) { - ensureTableListIsMutable(); - tableList_.add(builderForValue.build()); - onChanged(); - } else { - tableListBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder addTableList( - int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { - if (tableListBuilder_ == null) { - ensureTableListIsMutable(); - tableList_.add(index, builderForValue.build()); - onChanged(); - } else { - tableListBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder addAllTableList( - java.lang.Iterable values) { - if (tableListBuilder_ == null) { - ensureTableListIsMutable(); - super.addAll(values, tableList_); - onChanged(); - } else { - tableListBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder clearTableList() { - if (tableListBuilder_ == null) { - tableList_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - onChanged(); - } else { - tableListBuilder_.clear(); - } - return this; - } - /** - * repeated .hbase.pb.TableName table_list = 4; - */ - public Builder removeTableList(int index) { - if (tableListBuilder_ == null) { - ensureTableListIsMutable(); - tableList_.remove(index); - onChanged(); - } else { - tableListBuilder_.remove(index); - } - return this; - } - /** - * repeated .hbase.pb.TableName table_list = 4; + // @@protoc_insertion_point(class_scope:hbase.pb.SnapshotTableStateData) + } + + public interface BackupImageOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string backup_id = 1; + /** + * required string backup_id = 1; + */ + boolean hasBackupId(); + /** + * required string backup_id = 1; + */ + java.lang.String getBackupId(); + /** + * required string backup_id = 1; + */ + com.google.protobuf.ByteString + getBackupIdBytes(); + + // required .hbase.pb.BackupType backup_type = 2; + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + boolean hasBackupType(); + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType(); + + // required string root_dir = 3; + /** + * required string root_dir = 3; + */ + boolean hasRootDir(); + /** + * required string root_dir = 3; + */ + java.lang.String getRootDir(); + /** + * required string root_dir = 3; + */ + com.google.protobuf.ByteString + getRootDirBytes(); + + // repeated .hbase.pb.TableName table_list = 4; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + java.util.List + getTableListList(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + int getTableListCount(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + java.util.List + getTableListOrBuilderList(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index); + + // required uint64 start_ts = 5; + /** + * required uint64 start_ts = 5; + */ + boolean hasStartTs(); + /** + * required uint64 start_ts = 5; + */ + long getStartTs(); + + // required uint64 complete_ts = 6; + /** + * required uint64 complete_ts = 6; + */ + boolean hasCompleteTs(); + /** + * required uint64 complete_ts = 6; + */ + long getCompleteTs(); + + // repeated .hbase.pb.BackupImage ancestors = 7; + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + java.util.List + getAncestorsList(); + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getAncestors(int index); + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + int getAncestorsCount(); + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + java.util.List + getAncestorsOrBuilderList(); + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getAncestorsOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.BackupImage} + */ + public static final class BackupImage extends + com.google.protobuf.GeneratedMessage + implements BackupImageOrBuilder { + // Use BackupImage.newBuilder() to construct. + private BackupImage(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BackupImage(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BackupImage defaultInstance; + public static BackupImage getDefaultInstance() { + return defaultInstance; + } + + public BackupImage getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BackupImage( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + backupId_ = input.readBytes(); + break; + } + case 16: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + backupType_ = value; + } + break; + } + case 26: { + bitField0_ |= 0x00000004; + rootDir_ = input.readBytes(); + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + tableList_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + case 40: { + bitField0_ |= 0x00000008; + startTs_ = input.readUInt64(); + break; + } + case 48: { + bitField0_ |= 0x00000010; + completeTs_ = input.readUInt64(); + break; + } + case 58: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + ancestors_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + ancestors_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = java.util.Collections.unmodifiableList(tableList_); + } + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + ancestors_ = java.util.Collections.unmodifiableList(ancestors_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BackupImage parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BackupImage(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string backup_id = 1; + public static final int BACKUP_ID_FIELD_NUMBER = 1; + private java.lang.Object backupId_; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + backupId_ = s; + } + return s; + } + } + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .hbase.pb.BackupType backup_type = 2; + public static final int BACKUP_TYPE_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType backupType_; + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + public boolean hasBackupType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType() { + return backupType_; + } + + // required string root_dir = 3; + public static final int ROOT_DIR_FIELD_NUMBER = 3; + private java.lang.Object rootDir_; + /** + * required string root_dir = 3; + */ + public boolean hasRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string root_dir = 3; + */ + public java.lang.String getRootDir() { + java.lang.Object ref = rootDir_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + rootDir_ = s; + } + return s; + } + } + /** + * required string root_dir = 3; + */ + public com.google.protobuf.ByteString + getRootDirBytes() { + java.lang.Object ref = rootDir_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + rootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .hbase.pb.TableName table_list = 4; + public static final int TABLE_LIST_FIELD_NUMBER = 4; + private java.util.List tableList_; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List getTableListList() { + return tableList_; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List + getTableListOrBuilderList() { + return tableList_; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public int getTableListCount() { + return tableList_.size(); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { + return tableList_.get(index); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index) { + return tableList_.get(index); + } + + // required uint64 start_ts = 5; + public static final int START_TS_FIELD_NUMBER = 5; + private long startTs_; + /** + * required uint64 start_ts = 5; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 start_ts = 5; + */ + public long getStartTs() { + return startTs_; + } + + // required uint64 complete_ts = 6; + public static final int COMPLETE_TS_FIELD_NUMBER = 6; + private long completeTs_; + /** + * required uint64 complete_ts = 6; + */ + public boolean hasCompleteTs() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint64 complete_ts = 6; + */ + public long getCompleteTs() { + return completeTs_; + } + + // repeated .hbase.pb.BackupImage ancestors = 7; + public static final int ANCESTORS_FIELD_NUMBER = 7; + private java.util.List ancestors_; + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public java.util.List getAncestorsList() { + return ancestors_; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public java.util.List + getAncestorsOrBuilderList() { + return ancestors_; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public int getAncestorsCount() { + return ancestors_.size(); + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getAncestors(int index) { + return ancestors_.get(index); + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getAncestorsOrBuilder( + int index) { + return ancestors_.get(index); + } + + private void initFields() { + backupId_ = ""; + backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + rootDir_ = ""; + tableList_ = java.util.Collections.emptyList(); + startTs_ = 0L; + completeTs_ = 0L; + ancestors_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBackupId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasBackupType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRootDir()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasStartTs()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasCompleteTs()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTableListCount(); i++) { + if (!getTableList(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getAncestorsCount(); i++) { + if (!getAncestors(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getBackupIdBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeEnum(2, backupType_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getRootDirBytes()); + } + for (int i = 0; i < tableList_.size(); i++) { + output.writeMessage(4, tableList_.get(i)); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(5, startTs_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeUInt64(6, completeTs_); + } + for (int i = 0; i < ancestors_.size(); i++) { + output.writeMessage(7, ancestors_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getBackupIdBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(2, backupType_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getRootDirBytes()); + } + for (int i = 0; i < tableList_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, tableList_.get(i)); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(5, startTs_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(6, completeTs_); + } + for (int i = 0; i < ancestors_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, ancestors_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) obj; + + boolean result = true; + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); + } + result = result && (hasBackupType() == other.hasBackupType()); + if (hasBackupType()) { + result = result && + (getBackupType() == other.getBackupType()); + } + result = result && (hasRootDir() == other.hasRootDir()); + if (hasRootDir()) { + result = result && getRootDir() + .equals(other.getRootDir()); + } + result = result && getTableListList() + .equals(other.getTableListList()); + result = result && (hasStartTs() == other.hasStartTs()); + if (hasStartTs()) { + result = result && (getStartTs() + == other.getStartTs()); + } + result = result && (hasCompleteTs() == other.hasCompleteTs()); + if (hasCompleteTs()) { + result = result && (getCompleteTs() + == other.getCompleteTs()); + } + result = result && getAncestorsList() + .equals(other.getAncestorsList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + } + if (hasBackupType()) { + hash = (37 * hash) + BACKUP_TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getBackupType()); + } + if (hasRootDir()) { + hash = (37 * hash) + ROOT_DIR_FIELD_NUMBER; + hash = (53 * hash) + getRootDir().hashCode(); + } + if (getTableListCount() > 0) { + hash = (37 * hash) + TABLE_LIST_FIELD_NUMBER; + hash = (53 * hash) + getTableListList().hashCode(); + } + if (hasStartTs()) { + hash = (37 * hash) + START_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartTs()); + } + if (hasCompleteTs()) { + hash = (37 * hash) + COMPLETE_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getCompleteTs()); + } + if (getAncestorsCount() > 0) { + hash = (37 * hash) + ANCESTORS_FIELD_NUMBER; + hash = (53 * hash) + getAncestorsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupImage} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableListFieldBuilder(); + getAncestorsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + backupId_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + bitField0_ = (bitField0_ & ~0x00000002); + rootDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + if (tableListBuilder_ == null) { + tableList_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + } else { + tableListBuilder_.clear(); + } + startTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000010); + completeTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000020); + if (ancestorsBuilder_ == null) { + ancestors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ancestorsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.backupId_ = backupId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.backupType_ = backupType_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.rootDir_ = rootDir_; + if (tableListBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = java.util.Collections.unmodifiableList(tableList_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.tableList_ = tableList_; + } else { + result.tableList_ = tableListBuilder_.build(); + } + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000008; + } + result.startTs_ = startTs_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000010; + } + result.completeTs_ = completeTs_; + if (ancestorsBuilder_ == null) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { + ancestors_ = java.util.Collections.unmodifiableList(ancestors_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.ancestors_ = ancestors_; + } else { + result.ancestors_ = ancestorsBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()) return this; + if (other.hasBackupId()) { + bitField0_ |= 0x00000001; + backupId_ = other.backupId_; + onChanged(); + } + if (other.hasBackupType()) { + setBackupType(other.getBackupType()); + } + if (other.hasRootDir()) { + bitField0_ |= 0x00000004; + rootDir_ = other.rootDir_; + onChanged(); + } + if (tableListBuilder_ == null) { + if (!other.tableList_.isEmpty()) { + if (tableList_.isEmpty()) { + tableList_ = other.tableList_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureTableListIsMutable(); + tableList_.addAll(other.tableList_); + } + onChanged(); + } + } else { + if (!other.tableList_.isEmpty()) { + if (tableListBuilder_.isEmpty()) { + tableListBuilder_.dispose(); + tableListBuilder_ = null; + tableList_ = other.tableList_; + bitField0_ = (bitField0_ & ~0x00000008); + tableListBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableListFieldBuilder() : null; + } else { + tableListBuilder_.addAllMessages(other.tableList_); + } + } + } + if (other.hasStartTs()) { + setStartTs(other.getStartTs()); + } + if (other.hasCompleteTs()) { + setCompleteTs(other.getCompleteTs()); + } + if (ancestorsBuilder_ == null) { + if (!other.ancestors_.isEmpty()) { + if (ancestors_.isEmpty()) { + ancestors_ = other.ancestors_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureAncestorsIsMutable(); + ancestors_.addAll(other.ancestors_); + } + onChanged(); + } + } else { + if (!other.ancestors_.isEmpty()) { + if (ancestorsBuilder_.isEmpty()) { + ancestorsBuilder_.dispose(); + ancestorsBuilder_ = null; + ancestors_ = other.ancestors_; + bitField0_ = (bitField0_ & ~0x00000040); + ancestorsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getAncestorsFieldBuilder() : null; + } else { + ancestorsBuilder_.addAllMessages(other.ancestors_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBackupId()) { + + return false; + } + if (!hasBackupType()) { + + return false; + } + if (!hasRootDir()) { + + return false; + } + if (!hasStartTs()) { + + return false; + } + if (!hasCompleteTs()) { + + return false; + } + for (int i = 0; i < getTableListCount(); i++) { + if (!getTableList(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getAncestorsCount(); i++) { + if (!getAncestors(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string backup_id = 1; + private java.lang.Object backupId_ = ""; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string backup_id = 1; + */ + public Builder setBackupId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; + onChanged(); + return this; + } + /** + * required string backup_id = 1; + */ + public Builder clearBackupId() { + bitField0_ = (bitField0_ & ~0x00000001); + backupId_ = getDefaultInstance().getBackupId(); + onChanged(); + return this; + } + /** + * required string backup_id = 1; + */ + public Builder setBackupIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; + onChanged(); + return this; + } + + // required .hbase.pb.BackupType backup_type = 2; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + public boolean hasBackupType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType() { + return backupType_; + } + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + public Builder setBackupType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + backupType_ = value; + onChanged(); + return this; + } + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + public Builder clearBackupType() { + bitField0_ = (bitField0_ & ~0x00000002); + backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + onChanged(); + return this; + } + + // required string root_dir = 3; + private java.lang.Object rootDir_ = ""; + /** + * required string root_dir = 3; + */ + public boolean hasRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string root_dir = 3; + */ + public java.lang.String getRootDir() { + java.lang.Object ref = rootDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + rootDir_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string root_dir = 3; + */ + public com.google.protobuf.ByteString + getRootDirBytes() { + java.lang.Object ref = rootDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + rootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string root_dir = 3; + */ + public Builder setRootDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + rootDir_ = value; + onChanged(); + return this; + } + /** + * required string root_dir = 3; + */ + public Builder clearRootDir() { + bitField0_ = (bitField0_ & ~0x00000004); + rootDir_ = getDefaultInstance().getRootDir(); + onChanged(); + return this; + } + /** + * required string root_dir = 3; + */ + public Builder setRootDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + rootDir_ = value; + onChanged(); + return this; + } + + // repeated .hbase.pb.TableName table_list = 4; + private java.util.List tableList_ = + java.util.Collections.emptyList(); + private void ensureTableListIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = new java.util.ArrayList(tableList_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableListBuilder_; + + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List getTableListList() { + if (tableListBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableList_); + } else { + return tableListBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public int getTableListCount() { + if (tableListBuilder_ == null) { + return tableList_.size(); + } else { + return tableListBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { + if (tableListBuilder_ == null) { + return tableList_.get(index); + } else { + return tableListBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder setTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.set(index, value); + onChanged(); + } else { + tableListBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder setTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.set(index, builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.add(value); + onChanged(); + } else { + tableListBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.add(index, value); + onChanged(); + } else { + tableListBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.add(builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.add(index, builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addAllTableList( + java.lang.Iterable values) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + super.addAll(values, tableList_); + onChanged(); + } else { + tableListBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder clearTableList() { + if (tableListBuilder_ == null) { + tableList_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + tableListBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder removeTableList(int index) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.remove(index); + onChanged(); + } else { + tableListBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; */ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableListBuilder( int index) { @@ -7616,1400 +8614,2346 @@ public final class BackupProtos { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasBackupId()) { - memoizedIsInitialized = 0; - return false; + if (!hasBackupId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTargetRootDir()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTableBackupStatusCount(); i++) { + if (!getTableBackupStatus(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getBackupIdBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeEnum(2, type_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getTargetRootDirBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeEnum(4, state_.getNumber()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeEnum(5, phase_.getNumber()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeBytes(6, getFailedMessageBytes()); + } + for (int i = 0; i < tableBackupStatus_.size(); i++) { + output.writeMessage(7, tableBackupStatus_.get(i)); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeUInt64(8, startTs_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeUInt64(9, endTs_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + output.writeInt64(10, totalBytesCopied_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + output.writeBytes(11, getHlogTargetDirBytes()); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + output.writeUInt32(12, progress_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getBackupIdBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(2, type_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getTargetRootDirBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(4, state_.getNumber()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(5, phase_.getNumber()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(6, getFailedMessageBytes()); + } + for (int i = 0; i < tableBackupStatus_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, tableBackupStatus_.get(i)); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(8, startTs_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(9, endTs_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(10, totalBytesCopied_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(11, getHlogTargetDirBytes()); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(12, progress_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext) obj; + + boolean result = true; + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); } - if (!hasType()) { - memoizedIsInitialized = 0; - return false; + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && + (getType() == other.getType()); } - if (!hasTargetRootDir()) { - memoizedIsInitialized = 0; - return false; + result = result && (hasTargetRootDir() == other.hasTargetRootDir()); + if (hasTargetRootDir()) { + result = result && getTargetRootDir() + .equals(other.getTargetRootDir()); } - for (int i = 0; i < getTableBackupStatusCount(); i++) { - if (!getTableBackupStatus(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); } - memoizedIsInitialized = 1; - return true; + result = result && (hasPhase() == other.hasPhase()); + if (hasPhase()) { + result = result && + (getPhase() == other.getPhase()); + } + result = result && (hasFailedMessage() == other.hasFailedMessage()); + if (hasFailedMessage()) { + result = result && getFailedMessage() + .equals(other.getFailedMessage()); + } + result = result && getTableBackupStatusList() + .equals(other.getTableBackupStatusList()); + result = result && (hasStartTs() == other.hasStartTs()); + if (hasStartTs()) { + result = result && (getStartTs() + == other.getStartTs()); + } + result = result && (hasEndTs() == other.hasEndTs()); + if (hasEndTs()) { + result = result && (getEndTs() + == other.getEndTs()); + } + result = result && (hasTotalBytesCopied() == other.hasTotalBytesCopied()); + if (hasTotalBytesCopied()) { + result = result && (getTotalBytesCopied() + == other.getTotalBytesCopied()); + } + result = result && (hasHlogTargetDir() == other.hasHlogTargetDir()); + if (hasHlogTargetDir()) { + result = result && getHlogTargetDir() + .equals(other.getHlogTargetDir()); + } + result = result && (hasProgress() == other.hasProgress()); + if (hasProgress()) { + result = result && (getProgress() + == other.getProgress()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, getBackupIdBytes()); + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeEnum(2, type_.getNumber()); + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, getTargetRootDirBytes()); + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getType()); } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeEnum(4, state_.getNumber()); + if (hasTargetRootDir()) { + hash = (37 * hash) + TARGET_ROOT_DIR_FIELD_NUMBER; + hash = (53 * hash) + getTargetRootDir().hashCode(); } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - output.writeEnum(5, phase_.getNumber()); + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - output.writeBytes(6, getFailedMessageBytes()); + if (hasPhase()) { + hash = (37 * hash) + PHASE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getPhase()); } - for (int i = 0; i < tableBackupStatus_.size(); i++) { - output.writeMessage(7, tableBackupStatus_.get(i)); + if (hasFailedMessage()) { + hash = (37 * hash) + FAILED_MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getFailedMessage().hashCode(); } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - output.writeUInt64(8, startTs_); + if (getTableBackupStatusCount() > 0) { + hash = (37 * hash) + TABLE_BACKUP_STATUS_FIELD_NUMBER; + hash = (53 * hash) + getTableBackupStatusList().hashCode(); } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - output.writeUInt64(9, endTs_); + if (hasStartTs()) { + hash = (37 * hash) + START_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartTs()); } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - output.writeInt64(10, totalBytesCopied_); + if (hasEndTs()) { + hash = (37 * hash) + END_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getEndTs()); } - if (((bitField0_ & 0x00000200) == 0x00000200)) { - output.writeBytes(11, getHlogTargetDirBytes()); + if (hasTotalBytesCopied()) { + hash = (37 * hash) + TOTAL_BYTES_COPIED_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTotalBytesCopied()); } - if (((bitField0_ & 0x00000400) == 0x00000400)) { - output.writeUInt32(12, progress_); + if (hasHlogTargetDir()) { + hash = (37 * hash) + HLOG_TARGET_DIR_FIELD_NUMBER; + hash = (53 * hash) + getHlogTargetDir().hashCode(); } - getUnknownFields().writeTo(output); + if (hasProgress()) { + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = (53 * hash) + getProgress(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; } - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, getBackupIdBytes()); + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupContext} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContextOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_descriptor; } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(2, type_.getNumber()); + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder.class); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getTargetRootDirBytes()); + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); } - if (((bitField0_ & 0x00000008) == 0x00000008)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(4, state_.getNumber()); + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); } - if (((bitField0_ & 0x00000010) == 0x00000010)) { - size += com.google.protobuf.CodedOutputStream - .computeEnumSize(5, phase_.getNumber()); + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableBackupStatusFieldBuilder(); + } } - if (((bitField0_ & 0x00000020) == 0x00000020)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(6, getFailedMessageBytes()); + private static Builder create() { + return new Builder(); } - for (int i = 0; i < tableBackupStatus_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(7, tableBackupStatus_.get(i)); + + public Builder clear() { + super.clear(); + backupId_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + bitField0_ = (bitField0_ & ~0x00000002); + targetRootDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.WAITING; + bitField0_ = (bitField0_ & ~0x00000008); + phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.REQUEST; + bitField0_ = (bitField0_ & ~0x00000010); + failedMessage_ = ""; + bitField0_ = (bitField0_ & ~0x00000020); + if (tableBackupStatusBuilder_ == null) { + tableBackupStatus_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + } else { + tableBackupStatusBuilder_.clear(); + } + startTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000080); + endTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000100); + totalBytesCopied_ = 0L; + bitField0_ = (bitField0_ & ~0x00000200); + hlogTargetDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000400); + progress_ = 0; + bitField0_ = (bitField0_ & ~0x00000800); + return this; } - if (((bitField0_ & 0x00000040) == 0x00000040)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(8, startTs_); + + public Builder clone() { + return create().mergeFrom(buildPartial()); } - if (((bitField0_ & 0x00000080) == 0x00000080)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(9, endTs_); + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_descriptor; } - if (((bitField0_ & 0x00000100) == 0x00000100)) { - size += com.google.protobuf.CodedOutputStream - .computeInt64Size(10, totalBytesCopied_); + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance(); } - if (((bitField0_ & 0x00000200) == 0x00000200)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(11, getHlogTargetDirBytes()); + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; } - if (((bitField0_ & 0x00000400) == 0x00000400)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt32Size(12, progress_); + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.backupId_ = backupId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.type_ = type_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.targetRootDir_ = targetRootDir_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.state_ = state_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.phase_ = phase_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.failedMessage_ = failedMessage_; + if (tableBackupStatusBuilder_ == null) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { + tableBackupStatus_ = java.util.Collections.unmodifiableList(tableBackupStatus_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.tableBackupStatus_ = tableBackupStatus_; + } else { + result.tableBackupStatus_ = tableBackupStatusBuilder_.build(); + } + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000040; + } + result.startTs_ = startTs_; + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { + to_bitField0_ |= 0x00000080; + } + result.endTs_ = endTs_; + if (((from_bitField0_ & 0x00000200) == 0x00000200)) { + to_bitField0_ |= 0x00000100; + } + result.totalBytesCopied_ = totalBytesCopied_; + if (((from_bitField0_ & 0x00000400) == 0x00000400)) { + to_bitField0_ |= 0x00000200; + } + result.hlogTargetDir_ = hlogTargetDir_; + if (((from_bitField0_ & 0x00000800) == 0x00000800)) { + to_bitField0_ |= 0x00000400; + } + result.progress_ = progress_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext)other); + } else { + super.mergeFrom(other); + return this; + } + } - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance()) return this; + if (other.hasBackupId()) { + bitField0_ |= 0x00000001; + backupId_ = other.backupId_; + onChanged(); + } + if (other.hasType()) { + setType(other.getType()); + } + if (other.hasTargetRootDir()) { + bitField0_ |= 0x00000004; + targetRootDir_ = other.targetRootDir_; + onChanged(); + } + if (other.hasState()) { + setState(other.getState()); + } + if (other.hasPhase()) { + setPhase(other.getPhase()); + } + if (other.hasFailedMessage()) { + bitField0_ |= 0x00000020; + failedMessage_ = other.failedMessage_; + onChanged(); + } + if (tableBackupStatusBuilder_ == null) { + if (!other.tableBackupStatus_.isEmpty()) { + if (tableBackupStatus_.isEmpty()) { + tableBackupStatus_ = other.tableBackupStatus_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.addAll(other.tableBackupStatus_); + } + onChanged(); + } + } else { + if (!other.tableBackupStatus_.isEmpty()) { + if (tableBackupStatusBuilder_.isEmpty()) { + tableBackupStatusBuilder_.dispose(); + tableBackupStatusBuilder_ = null; + tableBackupStatus_ = other.tableBackupStatus_; + bitField0_ = (bitField0_ & ~0x00000040); + tableBackupStatusBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableBackupStatusFieldBuilder() : null; + } else { + tableBackupStatusBuilder_.addAllMessages(other.tableBackupStatus_); + } + } + } + if (other.hasStartTs()) { + setStartTs(other.getStartTs()); + } + if (other.hasEndTs()) { + setEndTs(other.getEndTs()); + } + if (other.hasTotalBytesCopied()) { + setTotalBytesCopied(other.getTotalBytesCopied()); + } + if (other.hasHlogTargetDir()) { + bitField0_ |= 0x00000400; + hlogTargetDir_ = other.hlogTargetDir_; + onChanged(); + } + if (other.hasProgress()) { + setProgress(other.getProgress()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext)) { - return super.equals(obj); + + public final boolean isInitialized() { + if (!hasBackupId()) { + + return false; + } + if (!hasType()) { + + return false; + } + if (!hasTargetRootDir()) { + + return false; + } + for (int i = 0; i < getTableBackupStatusCount(); i++) { + if (!getTableBackupStatus(i).isInitialized()) { + + return false; + } + } + return true; } - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext) obj; - boolean result = true; - result = result && (hasBackupId() == other.hasBackupId()); - if (hasBackupId()) { - result = result && getBackupId() - .equals(other.getBackupId()); + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; } - result = result && (hasType() == other.hasType()); - if (hasType()) { - result = result && - (getType() == other.getType()); + private int bitField0_; + + // required string backup_id = 1; + private java.lang.Object backupId_ = ""; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); } - result = result && (hasTargetRootDir() == other.hasTargetRootDir()); - if (hasTargetRootDir()) { - result = result && getTargetRootDir() - .equals(other.getTargetRootDir()); + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } } - result = result && (hasState() == other.hasState()); - if (hasState()) { - result = result && - (getState() == other.getState()); + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } - result = result && (hasPhase() == other.hasPhase()); - if (hasPhase()) { - result = result && - (getPhase() == other.getPhase()); + /** + * required string backup_id = 1; + */ + public Builder setBackupId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; + onChanged(); + return this; } - result = result && (hasFailedMessage() == other.hasFailedMessage()); - if (hasFailedMessage()) { - result = result && getFailedMessage() - .equals(other.getFailedMessage()); + /** + * required string backup_id = 1; + */ + public Builder clearBackupId() { + bitField0_ = (bitField0_ & ~0x00000001); + backupId_ = getDefaultInstance().getBackupId(); + onChanged(); + return this; } - result = result && getTableBackupStatusList() - .equals(other.getTableBackupStatusList()); - result = result && (hasStartTs() == other.hasStartTs()); - if (hasStartTs()) { - result = result && (getStartTs() - == other.getStartTs()); + /** + * required string backup_id = 1; + */ + public Builder setBackupIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; + onChanged(); + return this; } - result = result && (hasEndTs() == other.hasEndTs()); - if (hasEndTs()) { - result = result && (getEndTs() - == other.getEndTs()); + + // required .hbase.pb.BackupType type = 2; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + /** + * required .hbase.pb.BackupType type = 2; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000002) == 0x00000002); } - result = result && (hasTotalBytesCopied() == other.hasTotalBytesCopied()); - if (hasTotalBytesCopied()) { - result = result && (getTotalBytesCopied() - == other.getTotalBytesCopied()); + /** + * required .hbase.pb.BackupType type = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { + return type_; } - result = result && (hasHlogTargetDir() == other.hasHlogTargetDir()); - if (hasHlogTargetDir()) { - result = result && getHlogTargetDir() - .equals(other.getHlogTargetDir()); + /** + * required .hbase.pb.BackupType type = 2; + */ + public Builder setType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + type_ = value; + onChanged(); + return this; } - result = result && (hasProgress() == other.hasProgress()); - if (hasProgress()) { - result = result && (getProgress() - == other.getProgress()); + /** + * required .hbase.pb.BackupType type = 2; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000002); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + onChanged(); + return this; } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasBackupId()) { - hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; - hash = (53 * hash) + getBackupId().hashCode(); - } - if (hasType()) { - hash = (37 * hash) + TYPE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getType()); - } - if (hasTargetRootDir()) { - hash = (37 * hash) + TARGET_ROOT_DIR_FIELD_NUMBER; - hash = (53 * hash) + getTargetRootDir().hashCode(); + // required string target_root_dir = 3; + private java.lang.Object targetRootDir_ = ""; + /** + * required string target_root_dir = 3; + */ + public boolean hasTargetRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); } - if (hasState()) { - hash = (37 * hash) + STATE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getState()); + /** + * required string target_root_dir = 3; + */ + public java.lang.String getTargetRootDir() { + java.lang.Object ref = targetRootDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetRootDir_ = s; + return s; + } else { + return (java.lang.String) ref; + } } - if (hasPhase()) { - hash = (37 * hash) + PHASE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getPhase()); + /** + * required string target_root_dir = 3; + */ + public com.google.protobuf.ByteString + getTargetRootDirBytes() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetRootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } - if (hasFailedMessage()) { - hash = (37 * hash) + FAILED_MESSAGE_FIELD_NUMBER; - hash = (53 * hash) + getFailedMessage().hashCode(); + /** + * required string target_root_dir = 3; + */ + public Builder setTargetRootDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + targetRootDir_ = value; + onChanged(); + return this; } - if (getTableBackupStatusCount() > 0) { - hash = (37 * hash) + TABLE_BACKUP_STATUS_FIELD_NUMBER; - hash = (53 * hash) + getTableBackupStatusList().hashCode(); + /** + * required string target_root_dir = 3; + */ + public Builder clearTargetRootDir() { + bitField0_ = (bitField0_ & ~0x00000004); + targetRootDir_ = getDefaultInstance().getTargetRootDir(); + onChanged(); + return this; } - if (hasStartTs()) { - hash = (37 * hash) + START_TS_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getStartTs()); + /** + * required string target_root_dir = 3; + */ + public Builder setTargetRootDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + targetRootDir_ = value; + onChanged(); + return this; } - if (hasEndTs()) { - hash = (37 * hash) + END_TS_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getEndTs()); + + // optional .hbase.pb.BackupContext.BackupState state = 4; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.WAITING; + /** + * optional .hbase.pb.BackupContext.BackupState state = 4; + */ + public boolean hasState() { + return ((bitField0_ & 0x00000008) == 0x00000008); } - if (hasTotalBytesCopied()) { - hash = (37 * hash) + TOTAL_BYTES_COPIED_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getTotalBytesCopied()); + /** + * optional .hbase.pb.BackupContext.BackupState state = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState getState() { + return state_; } - if (hasHlogTargetDir()) { - hash = (37 * hash) + HLOG_TARGET_DIR_FIELD_NUMBER; - hash = (53 * hash) + getHlogTargetDir().hashCode(); + /** + * optional .hbase.pb.BackupContext.BackupState state = 4; + */ + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + state_ = value; + onChanged(); + return this; } - if (hasProgress()) { - hash = (37 * hash) + PROGRESS_FIELD_NUMBER; - hash = (53 * hash) + getProgress(); + /** + * optional .hbase.pb.BackupContext.BackupState state = 4; + */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000008); + state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.WAITING; + onChanged(); + return this; } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.BackupContext} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContextOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_descriptor; + // optional .hbase.pb.BackupContext.BackupPhase phase = 5; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.REQUEST; + /** + * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + */ + public boolean hasPhase() { + return ((bitField0_ & 0x00000010) == 0x00000010); } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder.class); + /** + * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase getPhase() { + return phase_; } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); + /** + * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + */ + public Builder setPhase(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + phase_ = value; + onChanged(); + return this; + } + /** + * optional .hbase.pb.BackupContext.BackupPhase phase = 5; + */ + public Builder clearPhase() { + bitField0_ = (bitField0_ & ~0x00000010); + phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.REQUEST; + onChanged(); + return this; } - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); + // optional string failed_message = 6; + private java.lang.Object failedMessage_ = ""; + /** + * optional string failed_message = 6; + */ + public boolean hasFailedMessage() { + return ((bitField0_ & 0x00000020) == 0x00000020); } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTableBackupStatusFieldBuilder(); + /** + * optional string failed_message = 6; + */ + public java.lang.String getFailedMessage() { + java.lang.Object ref = failedMessage_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + failedMessage_ = s; + return s; + } else { + return (java.lang.String) ref; } } - private static Builder create() { - return new Builder(); + /** + * optional string failed_message = 6; + */ + public com.google.protobuf.ByteString + getFailedMessageBytes() { + java.lang.Object ref = failedMessage_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + failedMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } } - - public Builder clear() { - super.clear(); - backupId_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - bitField0_ = (bitField0_ & ~0x00000002); - targetRootDir_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.WAITING; - bitField0_ = (bitField0_ & ~0x00000008); - phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.REQUEST; - bitField0_ = (bitField0_ & ~0x00000010); - failedMessage_ = ""; + /** + * optional string failed_message = 6; + */ + public Builder setFailedMessage( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + failedMessage_ = value; + onChanged(); + return this; + } + /** + * optional string failed_message = 6; + */ + public Builder clearFailedMessage() { bitField0_ = (bitField0_ & ~0x00000020); - if (tableBackupStatusBuilder_ == null) { - tableBackupStatus_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000040); - } else { - tableBackupStatusBuilder_.clear(); - } - startTs_ = 0L; - bitField0_ = (bitField0_ & ~0x00000080); - endTs_ = 0L; - bitField0_ = (bitField0_ & ~0x00000100); - totalBytesCopied_ = 0L; - bitField0_ = (bitField0_ & ~0x00000200); - hlogTargetDir_ = ""; - bitField0_ = (bitField0_ & ~0x00000400); - progress_ = 0; - bitField0_ = (bitField0_ & ~0x00000800); + failedMessage_ = getDefaultInstance().getFailedMessage(); + onChanged(); return this; } - - public Builder clone() { - return create().mergeFrom(buildPartial()); + /** + * optional string failed_message = 6; + */ + public Builder setFailedMessageBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + failedMessage_ = value; + onChanged(); + return this; } - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupContext_descriptor; + // repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + private java.util.List tableBackupStatus_ = + java.util.Collections.emptyList(); + private void ensureTableBackupStatusIsMutable() { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { + tableBackupStatus_ = new java.util.ArrayList(tableBackupStatus_); + bitField0_ |= 0x00000040; + } } - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance(); - } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder> tableBackupStatusBuilder_; - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext build() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public java.util.List getTableBackupStatusList() { + if (tableBackupStatusBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableBackupStatus_); + } else { + return tableBackupStatusBuilder_.getMessageList(); } - return result; } - - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.backupId_ = backupId_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.type_ = type_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.targetRootDir_ = targetRootDir_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000008; - } - result.state_ = state_; - if (((from_bitField0_ & 0x00000010) == 0x00000010)) { - to_bitField0_ |= 0x00000010; - } - result.phase_ = phase_; - if (((from_bitField0_ & 0x00000020) == 0x00000020)) { - to_bitField0_ |= 0x00000020; - } - result.failedMessage_ = failedMessage_; + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public int getTableBackupStatusCount() { if (tableBackupStatusBuilder_ == null) { - if (((bitField0_ & 0x00000040) == 0x00000040)) { - tableBackupStatus_ = java.util.Collections.unmodifiableList(tableBackupStatus_); - bitField0_ = (bitField0_ & ~0x00000040); - } - result.tableBackupStatus_ = tableBackupStatus_; + return tableBackupStatus_.size(); } else { - result.tableBackupStatus_ = tableBackupStatusBuilder_.build(); - } - if (((from_bitField0_ & 0x00000080) == 0x00000080)) { - to_bitField0_ |= 0x00000040; - } - result.startTs_ = startTs_; - if (((from_bitField0_ & 0x00000100) == 0x00000100)) { - to_bitField0_ |= 0x00000080; - } - result.endTs_ = endTs_; - if (((from_bitField0_ & 0x00000200) == 0x00000200)) { - to_bitField0_ |= 0x00000100; - } - result.totalBytesCopied_ = totalBytesCopied_; - if (((from_bitField0_ & 0x00000400) == 0x00000400)) { - to_bitField0_ |= 0x00000200; - } - result.hlogTargetDir_ = hlogTargetDir_; - if (((from_bitField0_ & 0x00000800) == 0x00000800)) { - to_bitField0_ |= 0x00000400; + return tableBackupStatusBuilder_.getCount(); } - result.progress_ = progress_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext)other); + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getTableBackupStatus(int index) { + if (tableBackupStatusBuilder_ == null) { + return tableBackupStatus_.get(index); } else { - super.mergeFrom(other); - return this; + return tableBackupStatusBuilder_.getMessage(index); } } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance()) return this; - if (other.hasBackupId()) { - bitField0_ |= 0x00000001; - backupId_ = other.backupId_; + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder setTableBackupStatus( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) { + if (tableBackupStatusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.set(index, value); onChanged(); + } else { + tableBackupStatusBuilder_.setMessage(index, value); } - if (other.hasType()) { - setType(other.getType()); - } - if (other.hasTargetRootDir()) { - bitField0_ |= 0x00000004; - targetRootDir_ = other.targetRootDir_; + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder setTableBackupStatus( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.set(index, builderForValue.build()); onChanged(); + } else { + tableBackupStatusBuilder_.setMessage(index, builderForValue.build()); } - if (other.hasState()) { - setState(other.getState()); - } - if (other.hasPhase()) { - setPhase(other.getPhase()); - } - if (other.hasFailedMessage()) { - bitField0_ |= 0x00000020; - failedMessage_ = other.failedMessage_; + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder addTableBackupStatus(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) { + if (tableBackupStatusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.add(value); onChanged(); + } else { + tableBackupStatusBuilder_.addMessage(value); } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder addTableBackupStatus( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) { if (tableBackupStatusBuilder_ == null) { - if (!other.tableBackupStatus_.isEmpty()) { - if (tableBackupStatus_.isEmpty()) { - tableBackupStatus_ = other.tableBackupStatus_; - bitField0_ = (bitField0_ & ~0x00000040); - } else { - ensureTableBackupStatusIsMutable(); - tableBackupStatus_.addAll(other.tableBackupStatus_); - } - onChanged(); + if (value == null) { + throw new NullPointerException(); } + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.add(index, value); + onChanged(); } else { - if (!other.tableBackupStatus_.isEmpty()) { - if (tableBackupStatusBuilder_.isEmpty()) { - tableBackupStatusBuilder_.dispose(); - tableBackupStatusBuilder_ = null; - tableBackupStatus_ = other.tableBackupStatus_; - bitField0_ = (bitField0_ & ~0x00000040); - tableBackupStatusBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getTableBackupStatusFieldBuilder() : null; - } else { - tableBackupStatusBuilder_.addAllMessages(other.tableBackupStatus_); - } - } - } - if (other.hasStartTs()) { - setStartTs(other.getStartTs()); + tableBackupStatusBuilder_.addMessage(index, value); } - if (other.hasEndTs()) { - setEndTs(other.getEndTs()); + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder addTableBackupStatus( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.add(builderForValue.build()); + onChanged(); + } else { + tableBackupStatusBuilder_.addMessage(builderForValue.build()); } - if (other.hasTotalBytesCopied()) { - setTotalBytesCopied(other.getTotalBytesCopied()); + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder addTableBackupStatus( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.add(index, builderForValue.build()); + onChanged(); + } else { + tableBackupStatusBuilder_.addMessage(index, builderForValue.build()); } - if (other.hasHlogTargetDir()) { - bitField0_ |= 0x00000400; - hlogTargetDir_ = other.hlogTargetDir_; + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder addAllTableBackupStatus( + java.lang.Iterable values) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + super.addAll(values, tableBackupStatus_); onChanged(); + } else { + tableBackupStatusBuilder_.addAllMessages(values); } - if (other.hasProgress()) { - setProgress(other.getProgress()); + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder clearTableBackupStatus() { + if (tableBackupStatusBuilder_ == null) { + tableBackupStatus_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + } else { + tableBackupStatusBuilder_.clear(); } - this.mergeUnknownFields(other.getUnknownFields()); return this; } - - public final boolean isInitialized() { - if (!hasBackupId()) { - - return false; + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder removeTableBackupStatus(int index) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.remove(index); + onChanged(); + } else { + tableBackupStatusBuilder_.remove(index); } - if (!hasType()) { - - return false; + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder getTableBackupStatusBuilder( + int index) { + return getTableBackupStatusFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getTableBackupStatusOrBuilder( + int index) { + if (tableBackupStatusBuilder_ == null) { + return tableBackupStatus_.get(index); } else { + return tableBackupStatusBuilder_.getMessageOrBuilder(index); } - if (!hasTargetRootDir()) { - - return false; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public java.util.List + getTableBackupStatusOrBuilderList() { + if (tableBackupStatusBuilder_ != null) { + return tableBackupStatusBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableBackupStatus_); } - for (int i = 0; i < getTableBackupStatusCount(); i++) { - if (!getTableBackupStatus(i).isInitialized()) { - - return false; - } + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder addTableBackupStatusBuilder() { + return getTableBackupStatusFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder addTableBackupStatusBuilder( + int index) { + return getTableBackupStatusFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public java.util.List + getTableBackupStatusBuilderList() { + return getTableBackupStatusFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder> + getTableBackupStatusFieldBuilder() { + if (tableBackupStatusBuilder_ == null) { + tableBackupStatusBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder>( + tableBackupStatus_, + ((bitField0_ & 0x00000040) == 0x00000040), + getParentForChildren(), + isClean()); + tableBackupStatus_ = null; } - return true; + return tableBackupStatusBuilder_; } - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } + // optional uint64 start_ts = 8; + private long startTs_ ; + /** + * optional uint64 start_ts = 8; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional uint64 start_ts = 8; + */ + public long getStartTs() { + return startTs_; + } + /** + * optional uint64 start_ts = 8; + */ + public Builder setStartTs(long value) { + bitField0_ |= 0x00000080; + startTs_ = value; + onChanged(); + return this; + } + /** + * optional uint64 start_ts = 8; + */ + public Builder clearStartTs() { + bitField0_ = (bitField0_ & ~0x00000080); + startTs_ = 0L; + onChanged(); return this; } - private int bitField0_; - // required string backup_id = 1; - private java.lang.Object backupId_ = ""; + // optional uint64 end_ts = 9; + private long endTs_ ; /** - * required string backup_id = 1; + * optional uint64 end_ts = 9; */ - public boolean hasBackupId() { - return ((bitField0_ & 0x00000001) == 0x00000001); + public boolean hasEndTs() { + return ((bitField0_ & 0x00000100) == 0x00000100); } /** - * required string backup_id = 1; + * optional uint64 end_ts = 9; */ - public java.lang.String getBackupId() { - java.lang.Object ref = backupId_; + public long getEndTs() { + return endTs_; + } + /** + * optional uint64 end_ts = 9; + */ + public Builder setEndTs(long value) { + bitField0_ |= 0x00000100; + endTs_ = value; + onChanged(); + return this; + } + /** + * optional uint64 end_ts = 9; + */ + public Builder clearEndTs() { + bitField0_ = (bitField0_ & ~0x00000100); + endTs_ = 0L; + onChanged(); + return this; + } + + // optional int64 total_bytes_copied = 10; + private long totalBytesCopied_ ; + /** + * optional int64 total_bytes_copied = 10; + */ + public boolean hasTotalBytesCopied() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional int64 total_bytes_copied = 10; + */ + public long getTotalBytesCopied() { + return totalBytesCopied_; + } + /** + * optional int64 total_bytes_copied = 10; + */ + public Builder setTotalBytesCopied(long value) { + bitField0_ |= 0x00000200; + totalBytesCopied_ = value; + onChanged(); + return this; + } + /** + * optional int64 total_bytes_copied = 10; + */ + public Builder clearTotalBytesCopied() { + bitField0_ = (bitField0_ & ~0x00000200); + totalBytesCopied_ = 0L; + onChanged(); + return this; + } + + // optional string hlog_target_dir = 11; + private java.lang.Object hlogTargetDir_ = ""; + /** + * optional string hlog_target_dir = 11; + */ + public boolean hasHlogTargetDir() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional string hlog_target_dir = 11; + */ + public java.lang.String getHlogTargetDir() { + java.lang.Object ref = hlogTargetDir_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); - backupId_ = s; + hlogTargetDir_ = s; return s; } else { return (java.lang.String) ref; } } /** - * required string backup_id = 1; + * optional string hlog_target_dir = 11; */ public com.google.protobuf.ByteString - getBackupIdBytes() { - java.lang.Object ref = backupId_; + getHlogTargetDirBytes() { + java.lang.Object ref = hlogTargetDir_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - backupId_ = b; + hlogTargetDir_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** - * required string backup_id = 1; + * optional string hlog_target_dir = 11; */ - public Builder setBackupId( + public Builder setHlogTargetDir( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000001; - backupId_ = value; + bitField0_ |= 0x00000400; + hlogTargetDir_ = value; + onChanged(); + return this; + } + /** + * optional string hlog_target_dir = 11; + */ + public Builder clearHlogTargetDir() { + bitField0_ = (bitField0_ & ~0x00000400); + hlogTargetDir_ = getDefaultInstance().getHlogTargetDir(); + onChanged(); + return this; + } + /** + * optional string hlog_target_dir = 11; + */ + public Builder setHlogTargetDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + hlogTargetDir_ = value; + onChanged(); + return this; + } + + // optional uint32 progress = 12; + private int progress_ ; + /** + * optional uint32 progress = 12; + */ + public boolean hasProgress() { + return ((bitField0_ & 0x00000800) == 0x00000800); + } + /** + * optional uint32 progress = 12; + */ + public int getProgress() { + return progress_; + } + /** + * optional uint32 progress = 12; + */ + public Builder setProgress(int value) { + bitField0_ |= 0x00000800; + progress_ = value; + onChanged(); + return this; + } + /** + * optional uint32 progress = 12; + */ + public Builder clearProgress() { + bitField0_ = (bitField0_ & ~0x00000800); + progress_ = 0; onChanged(); return this; } - /** - * required string backup_id = 1; - */ - public Builder clearBackupId() { - bitField0_ = (bitField0_ & ~0x00000001); - backupId_ = getDefaultInstance().getBackupId(); - onChanged(); - return this; + + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupContext) + } + + static { + defaultInstance = new BackupContext(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BackupContext) + } + + public interface BackupProcContextOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.BackupContext ctx = 1; + /** + * required .hbase.pb.BackupContext ctx = 1; + */ + boolean hasCtx(); + /** + * required .hbase.pb.BackupContext ctx = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext getCtx(); + /** + * required .hbase.pb.BackupContext ctx = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContextOrBuilder getCtxOrBuilder(); + + // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + java.util.List + getServerTimestampList(); + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index); + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + int getServerTimestampCount(); + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + java.util.List + getServerTimestampOrBuilderList(); + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.BackupProcContext} + */ + public static final class BackupProcContext extends + com.google.protobuf.GeneratedMessage + implements BackupProcContextOrBuilder { + // Use BackupProcContext.newBuilder() to construct. + private BackupProcContext(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BackupProcContext(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BackupProcContext defaultInstance; + public static BackupProcContext getDefaultInstance() { + return defaultInstance; + } + + public BackupProcContext getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BackupProcContext( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = ctx_.toBuilder(); + } + ctx_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(ctx_); + ctx_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + serverTimestamp_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = java.util.Collections.unmodifiableList(serverTimestamp_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupProcContext_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupProcContext_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BackupProcContext parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BackupProcContext(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.BackupContext ctx = 1; + public static final int CTX_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext ctx_; + /** + * required .hbase.pb.BackupContext ctx = 1; + */ + public boolean hasCtx() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.BackupContext ctx = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext getCtx() { + return ctx_; + } + /** + * required .hbase.pb.BackupContext ctx = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContextOrBuilder getCtxOrBuilder() { + return ctx_; + } + + // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + public static final int SERVER_TIMESTAMP_FIELD_NUMBER = 2; + private java.util.List serverTimestamp_; + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List getServerTimestampList() { + return serverTimestamp_; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List + getServerTimestampOrBuilderList() { + return serverTimestamp_; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public int getServerTimestampCount() { + return serverTimestamp_.size(); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index) { + return serverTimestamp_.get(index); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( + int index) { + return serverTimestamp_.get(index); + } + + private void initFields() { + ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance(); + serverTimestamp_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasCtx()) { + memoizedIsInitialized = 0; + return false; + } + if (!getCtx().isInitialized()) { + memoizedIsInitialized = 0; + return false; } - /** - * required string backup_id = 1; - */ - public Builder setBackupIdBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - backupId_ = value; - onChanged(); - return this; + for (int i = 0; i < getServerTimestampCount(); i++) { + if (!getServerTimestamp(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } } + memoizedIsInitialized = 1; + return true; + } - // required .hbase.pb.BackupType type = 2; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - /** - * required .hbase.pb.BackupType type = 2; - */ - public boolean hasType() { - return ((bitField0_ & 0x00000002) == 0x00000002); + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, ctx_); } - /** - * required .hbase.pb.BackupType type = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { - return type_; + for (int i = 0; i < serverTimestamp_.size(); i++) { + output.writeMessage(2, serverTimestamp_.get(i)); } - /** - * required .hbase.pb.BackupType type = 2; - */ - public Builder setType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - type_ = value; - onChanged(); - return this; + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, ctx_); } - /** - * required .hbase.pb.BackupType type = 2; - */ - public Builder clearType() { - bitField0_ = (bitField0_ & ~0x00000002); - type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; - onChanged(); - return this; + for (int i = 0; i < serverTimestamp_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, serverTimestamp_.get(i)); } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } - // required string target_root_dir = 3; - private java.lang.Object targetRootDir_ = ""; - /** - * required string target_root_dir = 3; - */ - public boolean hasTargetRootDir() { - return ((bitField0_ & 0x00000004) == 0x00000004); + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; } - /** - * required string target_root_dir = 3; - */ - public java.lang.String getTargetRootDir() { - java.lang.Object ref = targetRootDir_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - targetRootDir_ = s; - return s; - } else { - return (java.lang.String) ref; - } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext)) { + return super.equals(obj); } - /** - * required string target_root_dir = 3; - */ - public com.google.protobuf.ByteString - getTargetRootDirBytes() { - java.lang.Object ref = targetRootDir_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - targetRootDir_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext) obj; + + boolean result = true; + result = result && (hasCtx() == other.hasCtx()); + if (hasCtx()) { + result = result && getCtx() + .equals(other.getCtx()); } - /** - * required string target_root_dir = 3; - */ - public Builder setTargetRootDir( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - targetRootDir_ = value; - onChanged(); - return this; + result = result && getServerTimestampList() + .equals(other.getServerTimestampList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; } - /** - * required string target_root_dir = 3; - */ - public Builder clearTargetRootDir() { - bitField0_ = (bitField0_ & ~0x00000004); - targetRootDir_ = getDefaultInstance().getTargetRootDir(); - onChanged(); - return this; + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasCtx()) { + hash = (37 * hash) + CTX_FIELD_NUMBER; + hash = (53 * hash) + getCtx().hashCode(); } - /** - * required string target_root_dir = 3; - */ - public Builder setTargetRootDirBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - targetRootDir_ = value; - onChanged(); - return this; + if (getServerTimestampCount() > 0) { + hash = (37 * hash) + SERVER_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + getServerTimestampList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupProcContext} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContextOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupProcContext_descriptor; } - // optional .hbase.pb.BackupContext.BackupState state = 4; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.WAITING; - /** - * optional .hbase.pb.BackupContext.BackupState state = 4; - */ - public boolean hasState() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional .hbase.pb.BackupContext.BackupState state = 4; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState getState() { - return state_; - } - /** - * optional .hbase.pb.BackupContext.BackupState state = 4; - */ - public Builder setState(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000008; - state_ = value; - onChanged(); - return this; - } - /** - * optional .hbase.pb.BackupContext.BackupState state = 4; - */ - public Builder clearState() { - bitField0_ = (bitField0_ & ~0x00000008); - state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupState.WAITING; - onChanged(); - return this; + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupProcContext_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.Builder.class); } - // optional .hbase.pb.BackupContext.BackupPhase phase = 5; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.REQUEST; - /** - * optional .hbase.pb.BackupContext.BackupPhase phase = 5; - */ - public boolean hasPhase() { - return ((bitField0_ & 0x00000010) == 0x00000010); + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); } - /** - * optional .hbase.pb.BackupContext.BackupPhase phase = 5; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase getPhase() { - return phase_; + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); } - /** - * optional .hbase.pb.BackupContext.BackupPhase phase = 5; - */ - public Builder setPhase(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase value) { - if (value == null) { - throw new NullPointerException(); + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getCtxFieldBuilder(); + getServerTimestampFieldBuilder(); } - bitField0_ |= 0x00000010; - phase_ = value; - onChanged(); - return this; } - /** - * optional .hbase.pb.BackupContext.BackupPhase phase = 5; - */ - public Builder clearPhase() { - bitField0_ = (bitField0_ & ~0x00000010); - phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.BackupPhase.REQUEST; - onChanged(); - return this; + private static Builder create() { + return new Builder(); } - // optional string failed_message = 6; - private java.lang.Object failedMessage_ = ""; - /** - * optional string failed_message = 6; - */ - public boolean hasFailedMessage() { - return ((bitField0_ & 0x00000020) == 0x00000020); - } - /** - * optional string failed_message = 6; - */ - public java.lang.String getFailedMessage() { - java.lang.Object ref = failedMessage_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - failedMessage_ = s; - return s; + public Builder clear() { + super.clear(); + if (ctxBuilder_ == null) { + ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance(); } else { - return (java.lang.String) ref; + ctxBuilder_.clear(); } - } - /** - * optional string failed_message = 6; - */ - public com.google.protobuf.ByteString - getFailedMessageBytes() { - java.lang.Object ref = failedMessage_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - failedMessage_ = b; - return b; + bitField0_ = (bitField0_ & ~0x00000001); + if (serverTimestampBuilder_ == null) { + serverTimestamp_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); } else { - return (com.google.protobuf.ByteString) ref; + serverTimestampBuilder_.clear(); } - } - /** - * optional string failed_message = 6; - */ - public Builder setFailedMessage( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000020; - failedMessage_ = value; - onChanged(); return this; } - /** - * optional string failed_message = 6; - */ - public Builder clearFailedMessage() { - bitField0_ = (bitField0_ & ~0x00000020); - failedMessage_ = getDefaultInstance().getFailedMessage(); - onChanged(); - return this; + + public Builder clone() { + return create().mergeFrom(buildPartial()); } - /** - * optional string failed_message = 6; - */ - public Builder setFailedMessageBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000020; - failedMessage_ = value; - onChanged(); - return this; + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupProcContext_descriptor; } - // repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - private java.util.List tableBackupStatus_ = - java.util.Collections.emptyList(); - private void ensureTableBackupStatusIsMutable() { - if (!((bitField0_ & 0x00000040) == 0x00000040)) { - tableBackupStatus_ = new java.util.ArrayList(tableBackupStatus_); - bitField0_ |= 0x00000040; - } + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.getDefaultInstance(); } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder> tableBackupStatusBuilder_; + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public java.util.List getTableBackupStatusList() { - if (tableBackupStatusBuilder_ == null) { - return java.util.Collections.unmodifiableList(tableBackupStatus_); + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (ctxBuilder_ == null) { + result.ctx_ = ctx_; } else { - return tableBackupStatusBuilder_.getMessageList(); + result.ctx_ = ctxBuilder_.build(); } - } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public int getTableBackupStatusCount() { - if (tableBackupStatusBuilder_ == null) { - return tableBackupStatus_.size(); + if (serverTimestampBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = java.util.Collections.unmodifiableList(serverTimestamp_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.serverTimestamp_ = serverTimestamp_; } else { - return tableBackupStatusBuilder_.getCount(); + result.serverTimestamp_ = serverTimestampBuilder_.build(); } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getTableBackupStatus(int index) { - if (tableBackupStatusBuilder_ == null) { - return tableBackupStatus_.get(index); + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext)other); } else { - return tableBackupStatusBuilder_.getMessage(index); + super.mergeFrom(other); + return this; } } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public Builder setTableBackupStatus( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) { - if (tableBackupStatusBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.getDefaultInstance()) return this; + if (other.hasCtx()) { + mergeCtx(other.getCtx()); + } + if (serverTimestampBuilder_ == null) { + if (!other.serverTimestamp_.isEmpty()) { + if (serverTimestamp_.isEmpty()) { + serverTimestamp_ = other.serverTimestamp_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureServerTimestampIsMutable(); + serverTimestamp_.addAll(other.serverTimestamp_); + } + onChanged(); + } + } else { + if (!other.serverTimestamp_.isEmpty()) { + if (serverTimestampBuilder_.isEmpty()) { + serverTimestampBuilder_.dispose(); + serverTimestampBuilder_ = null; + serverTimestamp_ = other.serverTimestamp_; + bitField0_ = (bitField0_ & ~0x00000002); + serverTimestampBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getServerTimestampFieldBuilder() : null; + } else { + serverTimestampBuilder_.addAllMessages(other.serverTimestamp_); + } } - ensureTableBackupStatusIsMutable(); - tableBackupStatus_.set(index, value); - onChanged(); - } else { - tableBackupStatusBuilder_.setMessage(index, value); } + this.mergeUnknownFields(other.getUnknownFields()); return this; } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public Builder setTableBackupStatus( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) { - if (tableBackupStatusBuilder_ == null) { - ensureTableBackupStatusIsMutable(); - tableBackupStatus_.set(index, builderForValue.build()); - onChanged(); - } else { - tableBackupStatusBuilder_.setMessage(index, builderForValue.build()); + + public final boolean isInitialized() { + if (!hasCtx()) { + + return false; } - return this; + if (!getCtx().isInitialized()) { + + return false; + } + for (int i = 0; i < getServerTimestampCount(); i++) { + if (!getServerTimestamp(i).isInitialized()) { + + return false; + } + } + return true; } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public Builder addTableBackupStatus(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) { - if (tableBackupStatusBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); } - ensureTableBackupStatusIsMutable(); - tableBackupStatus_.add(value); - onChanged(); - } else { - tableBackupStatusBuilder_.addMessage(value); } return this; } + private int bitField0_; + + // required .hbase.pb.BackupContext ctx = 1; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContextOrBuilder> ctxBuilder_; /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + * required .hbase.pb.BackupContext ctx = 1; */ - public Builder addTableBackupStatus( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) { - if (tableBackupStatusBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTableBackupStatusIsMutable(); - tableBackupStatus_.add(index, value); - onChanged(); - } else { - tableBackupStatusBuilder_.addMessage(index, value); - } - return this; + public boolean hasCtx() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + * required .hbase.pb.BackupContext ctx = 1; */ - public Builder addTableBackupStatus( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) { - if (tableBackupStatusBuilder_ == null) { - ensureTableBackupStatusIsMutable(); - tableBackupStatus_.add(builderForValue.build()); - onChanged(); + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext getCtx() { + if (ctxBuilder_ == null) { + return ctx_; } else { - tableBackupStatusBuilder_.addMessage(builderForValue.build()); + return ctxBuilder_.getMessage(); } - return this; } /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + * required .hbase.pb.BackupContext ctx = 1; */ - public Builder addTableBackupStatus( - int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) { - if (tableBackupStatusBuilder_ == null) { - ensureTableBackupStatusIsMutable(); - tableBackupStatus_.add(index, builderForValue.build()); + public Builder setCtx(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext value) { + if (ctxBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ctx_ = value; onChanged(); } else { - tableBackupStatusBuilder_.addMessage(index, builderForValue.build()); + ctxBuilder_.setMessage(value); } + bitField0_ |= 0x00000001; return this; } /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + * required .hbase.pb.BackupContext ctx = 1; */ - public Builder addAllTableBackupStatus( - java.lang.Iterable values) { - if (tableBackupStatusBuilder_ == null) { - ensureTableBackupStatusIsMutable(); - super.addAll(values, tableBackupStatus_); + public Builder setCtx( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder builderForValue) { + if (ctxBuilder_ == null) { + ctx_ = builderForValue.build(); onChanged(); } else { - tableBackupStatusBuilder_.addAllMessages(values); + ctxBuilder_.setMessage(builderForValue.build()); } + bitField0_ |= 0x00000001; return this; } /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + * required .hbase.pb.BackupContext ctx = 1; */ - public Builder clearTableBackupStatus() { - if (tableBackupStatusBuilder_ == null) { - tableBackupStatus_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000040); + public Builder mergeCtx(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext value) { + if (ctxBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + ctx_ != org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance()) { + ctx_ = + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.newBuilder(ctx_).mergeFrom(value).buildPartial(); + } else { + ctx_ = value; + } onChanged(); } else { - tableBackupStatusBuilder_.clear(); + ctxBuilder_.mergeFrom(value); } + bitField0_ |= 0x00000001; return this; } /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + * required .hbase.pb.BackupContext ctx = 1; */ - public Builder removeTableBackupStatus(int index) { - if (tableBackupStatusBuilder_ == null) { - ensureTableBackupStatusIsMutable(); - tableBackupStatus_.remove(index); + public Builder clearCtx() { + if (ctxBuilder_ == null) { + ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.getDefaultInstance(); onChanged(); } else { - tableBackupStatusBuilder_.remove(index); + ctxBuilder_.clear(); } + bitField0_ = (bitField0_ & ~0x00000001); return this; } /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder getTableBackupStatusBuilder( - int index) { - return getTableBackupStatusFieldBuilder().getBuilder(index); - } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + * required .hbase.pb.BackupContext ctx = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getTableBackupStatusOrBuilder( - int index) { - if (tableBackupStatusBuilder_ == null) { - return tableBackupStatus_.get(index); } else { - return tableBackupStatusBuilder_.getMessageOrBuilder(index); - } + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder getCtxBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getCtxFieldBuilder().getBuilder(); } /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + * required .hbase.pb.BackupContext ctx = 1; */ - public java.util.List - getTableBackupStatusOrBuilderList() { - if (tableBackupStatusBuilder_ != null) { - return tableBackupStatusBuilder_.getMessageOrBuilderList(); + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContextOrBuilder getCtxOrBuilder() { + if (ctxBuilder_ != null) { + return ctxBuilder_.getMessageOrBuilder(); } else { - return java.util.Collections.unmodifiableList(tableBackupStatus_); + return ctx_; } } /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder addTableBackupStatusBuilder() { - return getTableBackupStatusFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance()); - } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; - */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder addTableBackupStatusBuilder( - int index) { - return getTableBackupStatusFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance()); - } - /** - * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + * required .hbase.pb.BackupContext ctx = 1; */ - public java.util.List - getTableBackupStatusBuilderList() { - return getTableBackupStatusFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder> - getTableBackupStatusFieldBuilder() { - if (tableBackupStatusBuilder_ == null) { - tableBackupStatusBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder>( - tableBackupStatus_, - ((bitField0_ & 0x00000040) == 0x00000040), + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContextOrBuilder> + getCtxFieldBuilder() { + if (ctxBuilder_ == null) { + ctxBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContext.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupContextOrBuilder>( + ctx_, getParentForChildren(), isClean()); - tableBackupStatus_ = null; + ctx_ = null; } - return tableBackupStatusBuilder_; + return ctxBuilder_; } - // optional uint64 start_ts = 8; - private long startTs_ ; - /** - * optional uint64 start_ts = 8; - */ - public boolean hasStartTs() { - return ((bitField0_ & 0x00000080) == 0x00000080); - } - /** - * optional uint64 start_ts = 8; - */ - public long getStartTs() { - return startTs_; - } - /** - * optional uint64 start_ts = 8; - */ - public Builder setStartTs(long value) { - bitField0_ |= 0x00000080; - startTs_ = value; - onChanged(); - return this; + // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + private java.util.List serverTimestamp_ = + java.util.Collections.emptyList(); + private void ensureServerTimestampIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = new java.util.ArrayList(serverTimestamp_); + bitField0_ |= 0x00000002; + } } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder> serverTimestampBuilder_; + /** - * optional uint64 start_ts = 8; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public Builder clearStartTs() { - bitField0_ = (bitField0_ & ~0x00000080); - startTs_ = 0L; - onChanged(); - return this; + public java.util.List getServerTimestampList() { + if (serverTimestampBuilder_ == null) { + return java.util.Collections.unmodifiableList(serverTimestamp_); + } else { + return serverTimestampBuilder_.getMessageList(); + } } - - // optional uint64 end_ts = 9; - private long endTs_ ; /** - * optional uint64 end_ts = 9; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public boolean hasEndTs() { - return ((bitField0_ & 0x00000100) == 0x00000100); + public int getServerTimestampCount() { + if (serverTimestampBuilder_ == null) { + return serverTimestamp_.size(); + } else { + return serverTimestampBuilder_.getCount(); + } } /** - * optional uint64 end_ts = 9; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public long getEndTs() { - return endTs_; + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index) { + if (serverTimestampBuilder_ == null) { + return serverTimestamp_.get(index); + } else { + return serverTimestampBuilder_.getMessage(index); + } } /** - * optional uint64 end_ts = 9; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public Builder setEndTs(long value) { - bitField0_ |= 0x00000100; - endTs_ = value; - onChanged(); + public Builder setServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { + if (serverTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerTimestampIsMutable(); + serverTimestamp_.set(index, value); + onChanged(); + } else { + serverTimestampBuilder_.setMessage(index, value); + } return this; } /** - * optional uint64 end_ts = 9; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public Builder clearEndTs() { - bitField0_ = (bitField0_ & ~0x00000100); - endTs_ = 0L; - onChanged(); + public Builder setServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.set(index, builderForValue.build()); + onChanged(); + } else { + serverTimestampBuilder_.setMessage(index, builderForValue.build()); + } return this; } - - // optional int64 total_bytes_copied = 10; - private long totalBytesCopied_ ; - /** - * optional int64 total_bytes_copied = 10; - */ - public boolean hasTotalBytesCopied() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } /** - * optional int64 total_bytes_copied = 10; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public long getTotalBytesCopied() { - return totalBytesCopied_; + public Builder addServerTimestamp(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { + if (serverTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerTimestampIsMutable(); + serverTimestamp_.add(value); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(value); + } + return this; } /** - * optional int64 total_bytes_copied = 10; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public Builder setTotalBytesCopied(long value) { - bitField0_ |= 0x00000200; - totalBytesCopied_ = value; - onChanged(); + public Builder addServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { + if (serverTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerTimestampIsMutable(); + serverTimestamp_.add(index, value); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(index, value); + } return this; } /** - * optional int64 total_bytes_copied = 10; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public Builder clearTotalBytesCopied() { - bitField0_ = (bitField0_ & ~0x00000200); - totalBytesCopied_ = 0L; - onChanged(); + public Builder addServerTimestamp( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.add(builderForValue.build()); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(builderForValue.build()); + } return this; } - - // optional string hlog_target_dir = 11; - private java.lang.Object hlogTargetDir_ = ""; /** - * optional string hlog_target_dir = 11; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public boolean hasHlogTargetDir() { - return ((bitField0_ & 0x00000400) == 0x00000400); + public Builder addServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.add(index, builderForValue.build()); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(index, builderForValue.build()); + } + return this; } /** - * optional string hlog_target_dir = 11; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public java.lang.String getHlogTargetDir() { - java.lang.Object ref = hlogTargetDir_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - hlogTargetDir_ = s; - return s; + public Builder addAllServerTimestamp( + java.lang.Iterable values) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + super.addAll(values, serverTimestamp_); + onChanged(); } else { - return (java.lang.String) ref; + serverTimestampBuilder_.addAllMessages(values); } + return this; } /** - * optional string hlog_target_dir = 11; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public com.google.protobuf.ByteString - getHlogTargetDirBytes() { - java.lang.Object ref = hlogTargetDir_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - hlogTargetDir_ = b; - return b; + public Builder clearServerTimestamp() { + if (serverTimestampBuilder_ == null) { + serverTimestamp_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); } else { - return (com.google.protobuf.ByteString) ref; + serverTimestampBuilder_.clear(); } + return this; } /** - * optional string hlog_target_dir = 11; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public Builder setHlogTargetDir( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000400; - hlogTargetDir_ = value; - onChanged(); + public Builder removeServerTimestamp(int index) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.remove(index); + onChanged(); + } else { + serverTimestampBuilder_.remove(index); + } return this; } /** - * optional string hlog_target_dir = 11; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public Builder clearHlogTargetDir() { - bitField0_ = (bitField0_ & ~0x00000400); - hlogTargetDir_ = getDefaultInstance().getHlogTargetDir(); - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder getServerTimestampBuilder( + int index) { + return getServerTimestampFieldBuilder().getBuilder(index); } /** - * optional string hlog_target_dir = 11; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public Builder setHlogTargetDirBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000400; - hlogTargetDir_ = value; - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( + int index) { + if (serverTimestampBuilder_ == null) { + return serverTimestamp_.get(index); } else { + return serverTimestampBuilder_.getMessageOrBuilder(index); + } } - - // optional uint32 progress = 12; - private int progress_ ; /** - * optional uint32 progress = 12; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public boolean hasProgress() { - return ((bitField0_ & 0x00000800) == 0x00000800); + public java.util.List + getServerTimestampOrBuilderList() { + if (serverTimestampBuilder_ != null) { + return serverTimestampBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(serverTimestamp_); + } } /** - * optional uint32 progress = 12; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public int getProgress() { - return progress_; + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder addServerTimestampBuilder() { + return getServerTimestampFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance()); } /** - * optional uint32 progress = 12; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public Builder setProgress(int value) { - bitField0_ |= 0x00000800; - progress_ = value; - onChanged(); - return this; + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder addServerTimestampBuilder( + int index) { + return getServerTimestampFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance()); } /** - * optional uint32 progress = 12; + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; */ - public Builder clearProgress() { - bitField0_ = (bitField0_ & ~0x00000800); - progress_ = 0; - onChanged(); - return this; + public java.util.List + getServerTimestampBuilderList() { + return getServerTimestampFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder> + getServerTimestampFieldBuilder() { + if (serverTimestampBuilder_ == null) { + serverTimestampBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder>( + serverTimestamp_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + serverTimestamp_ = null; + } + return serverTimestampBuilder_; } - // @@protoc_insertion_point(builder_scope:hbase.pb.BackupContext) + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupProcContext) } static { - defaultInstance = new BackupContext(true); + defaultInstance = new BackupProcContext(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.BackupContext) + // @@protoc_insertion_point(class_scope:hbase.pb.BackupProcContext) } private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_SnapshotTableStateData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_BackupImage_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -9039,6 +10983,11 @@ public final class BackupProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_BackupContext_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_BackupProcContext_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_BackupProcContext_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -9048,87 +10997,111 @@ public final class BackupProtos { descriptor; static { java.lang.String[] descriptorData = { - "\n\014Backup.proto\022\010hbase.pb\032\013HBase.proto\"\327\001" + - "\n\013BackupImage\022\021\n\tbackup_id\030\001 \002(\t\022)\n\013back" + - "up_type\030\002 \002(\0162\024.hbase.pb.BackupType\022\020\n\010r" + - "oot_dir\030\003 \002(\t\022\'\n\ntable_list\030\004 \003(\0132\023.hbas" + - "e.pb.TableName\022\020\n\010start_ts\030\005 \002(\004\022\023\n\013comp" + - "lete_ts\030\006 \002(\004\022(\n\tancestors\030\007 \003(\0132\025.hbase" + - ".pb.BackupImage\"4\n\017ServerTimestamp\022\016\n\006se" + - "rver\030\001 \002(\t\022\021\n\ttimestamp\030\002 \002(\004\"o\n\024TableSe" + - "rverTimestamp\022\"\n\005table\030\001 \002(\0132\023.hbase.pb." + - "TableName\0223\n\020server_timestamp\030\002 \003(\0132\031.hb", - "ase.pb.ServerTimestamp\"\313\002\n\016BackupManifes" + - "t\022\017\n\007version\030\001 \002(\t\022\021\n\tbackup_id\030\002 \002(\t\022\"\n" + - "\004type\030\003 \002(\0162\024.hbase.pb.BackupType\022\'\n\ntab" + - "le_list\030\004 \003(\0132\023.hbase.pb.TableName\022\020\n\010st" + - "art_ts\030\005 \002(\004\022\023\n\013complete_ts\030\006 \002(\004\022\023\n\013tot" + - "al_bytes\030\007 \002(\003\022\021\n\tlog_bytes\030\010 \001(\003\022/\n\007tst" + - "_map\030\t \003(\0132\036.hbase.pb.TableServerTimesta" + - "mp\0225\n\026dependent_backup_image\030\n \003(\0132\025.hba" + - "se.pb.BackupImage\022\021\n\tcompacted\030\013 \002(\010\"]\n\021" + - "TableBackupStatus\022\"\n\005table\030\001 \002(\0132\023.hbase", - ".pb.TableName\022\022\n\ntarget_dir\030\002 \002(\t\022\020\n\010sna" + - "pshot\030\003 \001(\t\"\323\004\n\rBackupContext\022\021\n\tbackup_" + - "id\030\001 \002(\t\022\"\n\004type\030\002 \002(\0162\024.hbase.pb.Backup" + - "Type\022\027\n\017target_root_dir\030\003 \002(\t\0222\n\005state\030\004" + - " \001(\0162#.hbase.pb.BackupContext.BackupStat" + - "e\0222\n\005phase\030\005 \001(\0162#.hbase.pb.BackupContex" + - "t.BackupPhase\022\026\n\016failed_message\030\006 \001(\t\0228\n" + - "\023table_backup_status\030\007 \003(\0132\033.hbase.pb.Ta" + - "bleBackupStatus\022\020\n\010start_ts\030\010 \001(\004\022\016\n\006end" + - "_ts\030\t \001(\004\022\032\n\022total_bytes_copied\030\n \001(\003\022\027\n", - "\017hlog_target_dir\030\013 \001(\t\022\020\n\010progress\030\014 \001(\r" + - "\"P\n\013BackupState\022\013\n\007WAITING\020\000\022\013\n\007RUNNING\020" + - "\001\022\014\n\010COMPLETE\020\002\022\n\n\006FAILED\020\003\022\r\n\tCANCELLED" + - "\020\004\"}\n\013BackupPhase\022\013\n\007REQUEST\020\000\022\014\n\010SNAPSH" + - "OT\020\001\022\027\n\023PREPARE_INCREMENTAL\020\002\022\020\n\014SNAPSHO" + - "TCOPY\020\003\022\024\n\020INCREMENTAL_COPY\020\004\022\022\n\016STORE_M" + - "ANIFEST\020\005*\'\n\nBackupType\022\010\n\004FULL\020\000\022\017\n\013INC" + - "REMENTAL\020\001BB\n*org.apache.hadoop.hbase.pr" + - "otobuf.generatedB\014BackupProtosH\001\210\001\001\240\001\001" + "\n\014Backup.proto\022\010hbase.pb\032\013HBase.proto\"R\n" + + "\026SnapshotTableStateData\022\"\n\005table\030\001 \002(\0132\023" + + ".hbase.pb.TableName\022\024\n\014snapshotName\030\002 \002(" + + "\t\"\327\001\n\013BackupImage\022\021\n\tbackup_id\030\001 \002(\t\022)\n\013" + + "backup_type\030\002 \002(\0162\024.hbase.pb.BackupType\022" + + "\020\n\010root_dir\030\003 \002(\t\022\'\n\ntable_list\030\004 \003(\0132\023." + + "hbase.pb.TableName\022\020\n\010start_ts\030\005 \002(\004\022\023\n\013" + + "complete_ts\030\006 \002(\004\022(\n\tancestors\030\007 \003(\0132\025.h" + + "base.pb.BackupImage\"4\n\017ServerTimestamp\022\016" + + "\n\006server\030\001 \002(\t\022\021\n\ttimestamp\030\002 \002(\004\"o\n\024Tab", + "leServerTimestamp\022\"\n\005table\030\001 \002(\0132\023.hbase" + + ".pb.TableName\0223\n\020server_timestamp\030\002 \003(\0132" + + "\031.hbase.pb.ServerTimestamp\"\313\002\n\016BackupMan" + + "ifest\022\017\n\007version\030\001 \002(\t\022\021\n\tbackup_id\030\002 \002(" + + "\t\022\"\n\004type\030\003 \002(\0162\024.hbase.pb.BackupType\022\'\n" + + "\ntable_list\030\004 \003(\0132\023.hbase.pb.TableName\022\020" + + "\n\010start_ts\030\005 \002(\004\022\023\n\013complete_ts\030\006 \002(\004\022\023\n" + + "\013total_bytes\030\007 \002(\003\022\021\n\tlog_bytes\030\010 \001(\003\022/\n" + + "\007tst_map\030\t \003(\0132\036.hbase.pb.TableServerTim" + + "estamp\0225\n\026dependent_backup_image\030\n \003(\0132\025", + ".hbase.pb.BackupImage\022\021\n\tcompacted\030\013 \002(\010" + + "\"]\n\021TableBackupStatus\022\"\n\005table\030\001 \002(\0132\023.h" + + "base.pb.TableName\022\022\n\ntarget_dir\030\002 \002(\t\022\020\n" + + "\010snapshot\030\003 \001(\t\"\323\004\n\rBackupContext\022\021\n\tbac" + + "kup_id\030\001 \002(\t\022\"\n\004type\030\002 \002(\0162\024.hbase.pb.Ba" + + "ckupType\022\027\n\017target_root_dir\030\003 \002(\t\0222\n\005sta" + + "te\030\004 \001(\0162#.hbase.pb.BackupContext.Backup" + + "State\0222\n\005phase\030\005 \001(\0162#.hbase.pb.BackupCo" + + "ntext.BackupPhase\022\026\n\016failed_message\030\006 \001(" + + "\t\0228\n\023table_backup_status\030\007 \003(\0132\033.hbase.p", + "b.TableBackupStatus\022\020\n\010start_ts\030\010 \001(\004\022\016\n" + + "\006end_ts\030\t \001(\004\022\032\n\022total_bytes_copied\030\n \001(" + + "\003\022\027\n\017hlog_target_dir\030\013 \001(\t\022\020\n\010progress\030\014" + + " \001(\r\"P\n\013BackupState\022\013\n\007WAITING\020\000\022\013\n\007RUNN" + + "ING\020\001\022\014\n\010COMPLETE\020\002\022\n\n\006FAILED\020\003\022\r\n\tCANCE" + + "LLED\020\004\"}\n\013BackupPhase\022\013\n\007REQUEST\020\000\022\014\n\010SN" + + "APSHOT\020\001\022\027\n\023PREPARE_INCREMENTAL\020\002\022\020\n\014SNA" + + "PSHOTCOPY\020\003\022\024\n\020INCREMENTAL_COPY\020\004\022\022\n\016STO" + + "RE_MANIFEST\020\005\"n\n\021BackupProcContext\022$\n\003ct" + + "x\030\001 \002(\0132\027.hbase.pb.BackupContext\0223\n\020serv", + "er_timestamp\030\002 \003(\0132\031.hbase.pb.ServerTime" + + "stamp*k\n\024FullTableBackupState\022\026\n\022PRE_SNA" + + "PSHOT_TABLE\020\001\022\023\n\017SNAPSHOT_TABLES\020\002\022\021\n\rSN" + + "APSHOT_COPY\020\003\022\023\n\017BACKUP_COMPLETE\020\004*f\n\033In" + + "crementalTableBackupState\022\027\n\023PREPARE_INC" + + "REMENTAL\020\001\022\024\n\020INCREMENTAL_COPY\020\002\022\030\n\024INCR" + + "_BACKUP_COMPLETE\020\003*(\n\022SnapshotTableState" + + "\022\022\n\016SNAPSHOT_TABLE\020\001*\'\n\nBackupType\022\010\n\004FU" + + "LL\020\000\022\017\n\013INCREMENTAL\020\001BB\n*org.apache.hado" + + "op.hbase.protobuf.generatedB\014BackupProto", + "sH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.Descriptors.FileDescriptor root) { descriptor = root; - internal_static_hbase_pb_BackupImage_descriptor = + internal_static_hbase_pb_SnapshotTableStateData_descriptor = getDescriptor().getMessageTypes().get(0); + internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_SnapshotTableStateData_descriptor, + new java.lang.String[] { "Table", "SnapshotName", }); + internal_static_hbase_pb_BackupImage_descriptor = + getDescriptor().getMessageTypes().get(1); internal_static_hbase_pb_BackupImage_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BackupImage_descriptor, new java.lang.String[] { "BackupId", "BackupType", "RootDir", "TableList", "StartTs", "CompleteTs", "Ancestors", }); internal_static_hbase_pb_ServerTimestamp_descriptor = - getDescriptor().getMessageTypes().get(1); + getDescriptor().getMessageTypes().get(2); internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ServerTimestamp_descriptor, new java.lang.String[] { "Server", "Timestamp", }); internal_static_hbase_pb_TableServerTimestamp_descriptor = - getDescriptor().getMessageTypes().get(2); + getDescriptor().getMessageTypes().get(3); internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_TableServerTimestamp_descriptor, new java.lang.String[] { "Table", "ServerTimestamp", }); internal_static_hbase_pb_BackupManifest_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(4); internal_static_hbase_pb_BackupManifest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BackupManifest_descriptor, new java.lang.String[] { "Version", "BackupId", "Type", "TableList", "StartTs", "CompleteTs", "TotalBytes", "LogBytes", "TstMap", "DependentBackupImage", "Compacted", }); internal_static_hbase_pb_TableBackupStatus_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(5); internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_TableBackupStatus_descriptor, new java.lang.String[] { "Table", "TargetDir", "Snapshot", }); internal_static_hbase_pb_BackupContext_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(6); internal_static_hbase_pb_BackupContext_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BackupContext_descriptor, new java.lang.String[] { "BackupId", "Type", "TargetRootDir", "State", "Phase", "FailedMessage", "TableBackupStatus", "StartTs", "EndTs", "TotalBytesCopied", "HlogTargetDir", "Progress", }); + internal_static_hbase_pb_BackupProcContext_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_hbase_pb_BackupProcContext_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_BackupProcContext_descriptor, + new java.lang.String[] { "Ctx", "ServerTimestamp", }); return null; } }; diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index 073eba964d993e14fcf148ef0e176f8d573e08a8..b7cbdc16679dcaa1d3bf081bc4b2c8b791f2a4c5 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -59215,6 +59215,2838 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:hbase.pb.SecurityCapabilitiesResponse) } + public interface BackupTablesRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.BackupType type = 1; + /** + * required .hbase.pb.BackupType type = 1; + */ + boolean hasType(); + /** + * required .hbase.pb.BackupType type = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType(); + + // repeated .hbase.pb.TableName tables = 2; + /** + * repeated .hbase.pb.TableName tables = 2; + */ + java.util.List + getTablesList(); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + int getTablesCount(); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + java.util.List + getTablesOrBuilderList(); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index); + + // required string target_root_dir = 3; + /** + * required string target_root_dir = 3; + */ + boolean hasTargetRootDir(); + /** + * required string target_root_dir = 3; + */ + java.lang.String getTargetRootDir(); + /** + * required string target_root_dir = 3; + */ + com.google.protobuf.ByteString + getTargetRootDirBytes(); + + // optional int64 workers = 4; + /** + * optional int64 workers = 4; + */ + boolean hasWorkers(); + /** + * optional int64 workers = 4; + */ + long getWorkers(); + + // optional int64 bandwidth = 5; + /** + * optional int64 bandwidth = 5; + */ + boolean hasBandwidth(); + /** + * optional int64 bandwidth = 5; + */ + long getBandwidth(); + } + /** + * Protobuf type {@code hbase.pb.BackupTablesRequest} + */ + public static final class BackupTablesRequest extends + com.google.protobuf.GeneratedMessage + implements BackupTablesRequestOrBuilder { + // Use BackupTablesRequest.newBuilder() to construct. + private BackupTablesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BackupTablesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BackupTablesRequest defaultInstance; + public static BackupTablesRequest getDefaultInstance() { + return defaultInstance; + } + + public BackupTablesRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BackupTablesRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + type_ = value; + } + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + tables_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + case 26: { + bitField0_ |= 0x00000002; + targetRootDir_ = input.readBytes(); + break; + } + case 32: { + bitField0_ |= 0x00000004; + workers_ = input.readInt64(); + break; + } + case 40: { + bitField0_ |= 0x00000008; + bandwidth_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BackupTablesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BackupTablesRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.BackupType type = 1; + public static final int TYPE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_; + /** + * required .hbase.pb.BackupType type = 1; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.BackupType type = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { + return type_; + } + + // repeated .hbase.pb.TableName tables = 2; + public static final int TABLES_FIELD_NUMBER = 2; + private java.util.List tables_; + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List getTablesList() { + return tables_; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List + getTablesOrBuilderList() { + return tables_; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public int getTablesCount() { + return tables_.size(); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { + return tables_.get(index); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + return tables_.get(index); + } + + // required string target_root_dir = 3; + public static final int TARGET_ROOT_DIR_FIELD_NUMBER = 3; + private java.lang.Object targetRootDir_; + /** + * required string target_root_dir = 3; + */ + public boolean hasTargetRootDir() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string target_root_dir = 3; + */ + public java.lang.String getTargetRootDir() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetRootDir_ = s; + } + return s; + } + } + /** + * required string target_root_dir = 3; + */ + public com.google.protobuf.ByteString + getTargetRootDirBytes() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetRootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional int64 workers = 4; + public static final int WORKERS_FIELD_NUMBER = 4; + private long workers_; + /** + * optional int64 workers = 4; + */ + public boolean hasWorkers() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int64 workers = 4; + */ + public long getWorkers() { + return workers_; + } + + // optional int64 bandwidth = 5; + public static final int BANDWIDTH_FIELD_NUMBER = 5; + private long bandwidth_; + /** + * optional int64 bandwidth = 5; + */ + public boolean hasBandwidth() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional int64 bandwidth = 5; + */ + public long getBandwidth() { + return bandwidth_; + } + + private void initFields() { + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + tables_ = java.util.Collections.emptyList(); + targetRootDir_ = ""; + workers_ = 0L; + bandwidth_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTargetRootDir()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, type_.getNumber()); + } + for (int i = 0; i < tables_.size(); i++) { + output.writeMessage(2, tables_.get(i)); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(3, getTargetRootDirBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt64(4, workers_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeInt64(5, bandwidth_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, type_.getNumber()); + } + for (int i = 0; i < tables_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tables_.get(i)); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getTargetRootDirBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(4, workers_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(5, bandwidth_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) obj; + + boolean result = true; + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && + (getType() == other.getType()); + } + result = result && getTablesList() + .equals(other.getTablesList()); + result = result && (hasTargetRootDir() == other.hasTargetRootDir()); + if (hasTargetRootDir()) { + result = result && getTargetRootDir() + .equals(other.getTargetRootDir()); + } + result = result && (hasWorkers() == other.hasWorkers()); + if (hasWorkers()) { + result = result && (getWorkers() + == other.getWorkers()); + } + result = result && (hasBandwidth() == other.hasBandwidth()); + if (hasBandwidth()) { + result = result && (getBandwidth() + == other.getBandwidth()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getType()); + } + if (getTablesCount() > 0) { + hash = (37 * hash) + TABLES_FIELD_NUMBER; + hash = (53 * hash) + getTablesList().hashCode(); + } + if (hasTargetRootDir()) { + hash = (37 * hash) + TARGET_ROOT_DIR_FIELD_NUMBER; + hash = (53 * hash) + getTargetRootDir().hashCode(); + } + if (hasWorkers()) { + hash = (37 * hash) + WORKERS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getWorkers()); + } + if (hasBandwidth()) { + hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getBandwidth()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupTablesRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTablesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + bitField0_ = (bitField0_ & ~0x00000001); + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + tablesBuilder_.clear(); + } + targetRootDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + workers_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + bandwidth_ = 0L; + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.type_ = type_; + if (tablesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.tables_ = tables_; + } else { + result.tables_ = tablesBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000002; + } + result.targetRootDir_ = targetRootDir_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000004; + } + result.workers_ = workers_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000008; + } + result.bandwidth_ = bandwidth_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance()) return this; + if (other.hasType()) { + setType(other.getType()); + } + if (tablesBuilder_ == null) { + if (!other.tables_.isEmpty()) { + if (tables_.isEmpty()) { + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureTablesIsMutable(); + tables_.addAll(other.tables_); + } + onChanged(); + } + } else { + if (!other.tables_.isEmpty()) { + if (tablesBuilder_.isEmpty()) { + tablesBuilder_.dispose(); + tablesBuilder_ = null; + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000002); + tablesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTablesFieldBuilder() : null; + } else { + tablesBuilder_.addAllMessages(other.tables_); + } + } + } + if (other.hasTargetRootDir()) { + bitField0_ |= 0x00000004; + targetRootDir_ = other.targetRootDir_; + onChanged(); + } + if (other.hasWorkers()) { + setWorkers(other.getWorkers()); + } + if (other.hasBandwidth()) { + setBandwidth(other.getBandwidth()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasType()) { + + return false; + } + if (!hasTargetRootDir()) { + + return false; + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.BackupType type = 1; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + /** + * required .hbase.pb.BackupType type = 1; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.BackupType type = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { + return type_; + } + /** + * required .hbase.pb.BackupType type = 1; + */ + public Builder setType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + type_ = value; + onChanged(); + return this; + } + /** + * required .hbase.pb.BackupType type = 1; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000001); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + onChanged(); + return this; + } + + // repeated .hbase.pb.TableName tables = 2; + private java.util.List tables_ = + java.util.Collections.emptyList(); + private void ensureTablesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = new java.util.ArrayList(tables_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tablesBuilder_; + + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List getTablesList() { + if (tablesBuilder_ == null) { + return java.util.Collections.unmodifiableList(tables_); + } else { + return tablesBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public int getTablesCount() { + if (tablesBuilder_ == null) { + return tables_.size(); + } else { + return tablesBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); + } else { + return tablesBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.set(index, value); + onChanged(); + } else { + tablesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.set(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(value); + onChanged(); + } else { + tablesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(index, value); + onChanged(); + } else { + tablesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addAllTables( + java.lang.Iterable values) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + super.addAll(values, tables_); + onChanged(); + } else { + tablesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder clearTables() { + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + tablesBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder removeTables(int index) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.remove(index); + onChanged(); + } else { + tablesBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTablesBuilder( + int index) { + return getTablesFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); } else { + return tablesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List + getTablesOrBuilderList() { + if (tablesBuilder_ != null) { + return tablesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tables_); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder() { + return getTablesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder( + int index) { + return getTablesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List + getTablesBuilderList() { + return getTablesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTablesFieldBuilder() { + if (tablesBuilder_ == null) { + tablesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tables_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + tables_ = null; + } + return tablesBuilder_; + } + + // required string target_root_dir = 3; + private java.lang.Object targetRootDir_ = ""; + /** + * required string target_root_dir = 3; + */ + public boolean hasTargetRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string target_root_dir = 3; + */ + public java.lang.String getTargetRootDir() { + java.lang.Object ref = targetRootDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetRootDir_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string target_root_dir = 3; + */ + public com.google.protobuf.ByteString + getTargetRootDirBytes() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetRootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string target_root_dir = 3; + */ + public Builder setTargetRootDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + targetRootDir_ = value; + onChanged(); + return this; + } + /** + * required string target_root_dir = 3; + */ + public Builder clearTargetRootDir() { + bitField0_ = (bitField0_ & ~0x00000004); + targetRootDir_ = getDefaultInstance().getTargetRootDir(); + onChanged(); + return this; + } + /** + * required string target_root_dir = 3; + */ + public Builder setTargetRootDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + targetRootDir_ = value; + onChanged(); + return this; + } + + // optional int64 workers = 4; + private long workers_ ; + /** + * optional int64 workers = 4; + */ + public boolean hasWorkers() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional int64 workers = 4; + */ + public long getWorkers() { + return workers_; + } + /** + * optional int64 workers = 4; + */ + public Builder setWorkers(long value) { + bitField0_ |= 0x00000008; + workers_ = value; + onChanged(); + return this; + } + /** + * optional int64 workers = 4; + */ + public Builder clearWorkers() { + bitField0_ = (bitField0_ & ~0x00000008); + workers_ = 0L; + onChanged(); + return this; + } + + // optional int64 bandwidth = 5; + private long bandwidth_ ; + /** + * optional int64 bandwidth = 5; + */ + public boolean hasBandwidth() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional int64 bandwidth = 5; + */ + public long getBandwidth() { + return bandwidth_; + } + /** + * optional int64 bandwidth = 5; + */ + public Builder setBandwidth(long value) { + bitField0_ |= 0x00000010; + bandwidth_ = value; + onChanged(); + return this; + } + /** + * optional int64 bandwidth = 5; + */ + public Builder clearBandwidth() { + bitField0_ = (bitField0_ & ~0x00000010); + bandwidth_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupTablesRequest) + } + + static { + defaultInstance = new BackupTablesRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BackupTablesRequest) + } + + public interface BackupTablesResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional uint64 proc_id = 1; + /** + * optional uint64 proc_id = 1; + */ + boolean hasProcId(); + /** + * optional uint64 proc_id = 1; + */ + long getProcId(); + + // optional string backup_id = 2; + /** + * optional string backup_id = 2; + */ + boolean hasBackupId(); + /** + * optional string backup_id = 2; + */ + java.lang.String getBackupId(); + /** + * optional string backup_id = 2; + */ + com.google.protobuf.ByteString + getBackupIdBytes(); + } + /** + * Protobuf type {@code hbase.pb.BackupTablesResponse} + */ + public static final class BackupTablesResponse extends + com.google.protobuf.GeneratedMessage + implements BackupTablesResponseOrBuilder { + // Use BackupTablesResponse.newBuilder() to construct. + private BackupTablesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BackupTablesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BackupTablesResponse defaultInstance; + public static BackupTablesResponse getDefaultInstance() { + return defaultInstance; + } + + public BackupTablesResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BackupTablesResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + backupId_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BackupTablesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BackupTablesResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional uint64 proc_id = 1; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + + // optional string backup_id = 2; + public static final int BACKUP_ID_FIELD_NUMBER = 2; + private java.lang.Object backupId_; + /** + * optional string backup_id = 2; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string backup_id = 2; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + backupId_ = s; + } + return s; + } + } + /** + * optional string backup_id = 2; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + procId_ = 0L; + backupId_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, procId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getBackupIdBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, procId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getBackupIdBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) obj; + + boolean result = true; + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupTablesResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + backupId_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.procId_ = procId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.backupId_ = backupId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); + } + if (other.hasBackupId()) { + bitField0_ |= 0x00000002; + backupId_ = other.backupId_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional uint64 proc_id = 1; + private long procId_ ; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); + return this; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000001); + procId_ = 0L; + onChanged(); + return this; + } + + // optional string backup_id = 2; + private java.lang.Object backupId_ = ""; + /** + * optional string backup_id = 2; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string backup_id = 2; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string backup_id = 2; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string backup_id = 2; + */ + public Builder setBackupId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + backupId_ = value; + onChanged(); + return this; + } + /** + * optional string backup_id = 2; + */ + public Builder clearBackupId() { + bitField0_ = (bitField0_ & ~0x00000002); + backupId_ = getDefaultInstance().getBackupId(); + onChanged(); + return this; + } + /** + * optional string backup_id = 2; + */ + public Builder setBackupIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + backupId_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupTablesResponse) + } + + static { + defaultInstance = new BackupTablesResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BackupTablesResponse) + } + + public interface AbortBackupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string backup_id = 1; + /** + * required string backup_id = 1; + */ + boolean hasBackupId(); + /** + * required string backup_id = 1; + */ + java.lang.String getBackupId(); + /** + * required string backup_id = 1; + */ + com.google.protobuf.ByteString + getBackupIdBytes(); + + // optional bool mayInterruptIfRunning = 2 [default = true]; + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + boolean hasMayInterruptIfRunning(); + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + boolean getMayInterruptIfRunning(); + } + /** + * Protobuf type {@code hbase.pb.AbortBackupRequest} + */ + public static final class AbortBackupRequest extends + com.google.protobuf.GeneratedMessage + implements AbortBackupRequestOrBuilder { + // Use AbortBackupRequest.newBuilder() to construct. + private AbortBackupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AbortBackupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AbortBackupRequest defaultInstance; + public static AbortBackupRequest getDefaultInstance() { + return defaultInstance; + } + + public AbortBackupRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AbortBackupRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + backupId_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + mayInterruptIfRunning_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortBackupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortBackupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AbortBackupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AbortBackupRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string backup_id = 1; + public static final int BACKUP_ID_FIELD_NUMBER = 1; + private java.lang.Object backupId_; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + backupId_ = s; + } + return s; + } + } + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional bool mayInterruptIfRunning = 2 [default = true]; + public static final int MAYINTERRUPTIFRUNNING_FIELD_NUMBER = 2; + private boolean mayInterruptIfRunning_; + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public boolean hasMayInterruptIfRunning() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public boolean getMayInterruptIfRunning() { + return mayInterruptIfRunning_; + } + + private void initFields() { + backupId_ = ""; + mayInterruptIfRunning_ = true; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBackupId()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getBackupIdBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBool(2, mayInterruptIfRunning_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getBackupIdBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(2, mayInterruptIfRunning_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest) obj; + + boolean result = true; + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); + } + result = result && (hasMayInterruptIfRunning() == other.hasMayInterruptIfRunning()); + if (hasMayInterruptIfRunning()) { + result = result && (getMayInterruptIfRunning() + == other.getMayInterruptIfRunning()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + } + if (hasMayInterruptIfRunning()) { + hash = (37 * hash) + MAYINTERRUPTIFRUNNING_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getMayInterruptIfRunning()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.AbortBackupRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortBackupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortBackupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + backupId_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + mayInterruptIfRunning_ = true; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortBackupRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.backupId_ = backupId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.mayInterruptIfRunning_ = mayInterruptIfRunning_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest.getDefaultInstance()) return this; + if (other.hasBackupId()) { + bitField0_ |= 0x00000001; + backupId_ = other.backupId_; + onChanged(); + } + if (other.hasMayInterruptIfRunning()) { + setMayInterruptIfRunning(other.getMayInterruptIfRunning()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBackupId()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string backup_id = 1; + private java.lang.Object backupId_ = ""; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string backup_id = 1; + */ + public Builder setBackupId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; + onChanged(); + return this; + } + /** + * required string backup_id = 1; + */ + public Builder clearBackupId() { + bitField0_ = (bitField0_ & ~0x00000001); + backupId_ = getDefaultInstance().getBackupId(); + onChanged(); + return this; + } + /** + * required string backup_id = 1; + */ + public Builder setBackupIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; + onChanged(); + return this; + } + + // optional bool mayInterruptIfRunning = 2 [default = true]; + private boolean mayInterruptIfRunning_ = true; + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public boolean hasMayInterruptIfRunning() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public boolean getMayInterruptIfRunning() { + return mayInterruptIfRunning_; + } + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public Builder setMayInterruptIfRunning(boolean value) { + bitField0_ |= 0x00000002; + mayInterruptIfRunning_ = value; + onChanged(); + return this; + } + /** + * optional bool mayInterruptIfRunning = 2 [default = true]; + */ + public Builder clearMayInterruptIfRunning() { + bitField0_ = (bitField0_ & ~0x00000002); + mayInterruptIfRunning_ = true; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.AbortBackupRequest) + } + + static { + defaultInstance = new AbortBackupRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.AbortBackupRequest) + } + + public interface AbortBackupResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool is_backup_aborted = 1; + /** + * required bool is_backup_aborted = 1; + */ + boolean hasIsBackupAborted(); + /** + * required bool is_backup_aborted = 1; + */ + boolean getIsBackupAborted(); + } + /** + * Protobuf type {@code hbase.pb.AbortBackupResponse} + */ + public static final class AbortBackupResponse extends + com.google.protobuf.GeneratedMessage + implements AbortBackupResponseOrBuilder { + // Use AbortBackupResponse.newBuilder() to construct. + private AbortBackupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AbortBackupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AbortBackupResponse defaultInstance; + public static AbortBackupResponse getDefaultInstance() { + return defaultInstance; + } + + public AbortBackupResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AbortBackupResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + isBackupAborted_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortBackupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortBackupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AbortBackupResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AbortBackupResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bool is_backup_aborted = 1; + public static final int IS_BACKUP_ABORTED_FIELD_NUMBER = 1; + private boolean isBackupAborted_; + /** + * required bool is_backup_aborted = 1; + */ + public boolean hasIsBackupAborted() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool is_backup_aborted = 1; + */ + public boolean getIsBackupAborted() { + return isBackupAborted_; + } + + private void initFields() { + isBackupAborted_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasIsBackupAborted()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, isBackupAborted_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, isBackupAborted_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse) obj; + + boolean result = true; + result = result && (hasIsBackupAborted() == other.hasIsBackupAborted()); + if (hasIsBackupAborted()) { + result = result && (getIsBackupAborted() + == other.getIsBackupAborted()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasIsBackupAborted()) { + hash = (37 * hash) + IS_BACKUP_ABORTED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getIsBackupAborted()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.AbortBackupResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortBackupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortBackupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + isBackupAborted_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_AbortBackupResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.isBackupAborted_ = isBackupAborted_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse.getDefaultInstance()) return this; + if (other.hasIsBackupAborted()) { + setIsBackupAborted(other.getIsBackupAborted()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasIsBackupAborted()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bool is_backup_aborted = 1; + private boolean isBackupAborted_ ; + /** + * required bool is_backup_aborted = 1; + */ + public boolean hasIsBackupAborted() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool is_backup_aborted = 1; + */ + public boolean getIsBackupAborted() { + return isBackupAborted_; + } + /** + * required bool is_backup_aborted = 1; + */ + public Builder setIsBackupAborted(boolean value) { + bitField0_ |= 0x00000001; + isBackupAborted_ = value; + onChanged(); + return this; + } + /** + * required bool is_backup_aborted = 1; + */ + public Builder clearIsBackupAborted() { + bitField0_ = (bitField0_ & ~0x00000001); + isBackupAborted_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.AbortBackupResponse) + } + + static { + defaultInstance = new AbortBackupResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.AbortBackupResponse) + } + /** * Protobuf service {@code hbase.pb.MasterService} */ @@ -59936,6 +62768,30 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc backupTables(.hbase.pb.BackupTablesRequest) returns (.hbase.pb.BackupTablesResponse); + * + *
+       ** backup table set 
+       * 
+ */ + public abstract void backupTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc AbortBackup(.hbase.pb.AbortBackupRequest) returns (.hbase.pb.AbortBackupResponse); + * + *
+       ** Abort a backup 
+       * 
+ */ + public abstract void abortBackup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -60397,6 +63253,22 @@ public final class MasterProtos { impl.listProcedures(controller, request, done); } + @java.lang.Override + public void backupTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request, + com.google.protobuf.RpcCallback done) { + impl.backupTables(controller, request, done); + } + + @java.lang.Override + public void abortBackup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest request, + com.google.protobuf.RpcCallback done) { + impl.abortBackup(controller, request, done); + } + }; } @@ -60533,6 +63405,10 @@ public final class MasterProtos { return impl.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request); case 56: return impl.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request); + case 57: + return impl.backupTables(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)request); + case 58: + return impl.abortBackup(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -60661,6 +63537,10 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); + case 57: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance(); + case 58: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -60789,6 +63669,10 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); + case 57: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(); + case 58: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -61510,6 +64394,30 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc backupTables(.hbase.pb.BackupTablesRequest) returns (.hbase.pb.BackupTablesResponse); + * + *
+     ** backup table set 
+     * 
+ */ + public abstract void backupTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc AbortBackup(.hbase.pb.AbortBackupRequest) returns (.hbase.pb.AbortBackupResponse); + * + *
+     ** Abort a backup 
+     * 
+ */ + public abstract void abortBackup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -61817,6 +64725,16 @@ public final class MasterProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 57: + this.backupTables(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 58: + this.abortBackup(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -61945,6 +64863,10 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); + case 57: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance(); + case 58: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -62073,6 +64995,10 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); + case 57: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(); + case 58: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -62948,6 +65874,36 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance())); } + + public void backupTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(57), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance())); + } + + public void abortBackup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(58), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -63240,6 +66196,16 @@ public final class MasterProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse backupTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse abortBackup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -63932,6 +66898,30 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse backupTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(57), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse abortBackup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(58), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortBackupResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:hbase.pb.MasterService) @@ -64482,6 +67472,26 @@ public final class MasterProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_BackupTablesRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_BackupTablesRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_BackupTablesResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_AbortBackupRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_AbortBackupRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_AbortBackupResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_AbortBackupResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -64491,207 +67501,216 @@ public final class MasterProtos { descriptor; static { java.lang.String[] descriptorData = { - "\n\014Master.proto\022\010hbase.pb\032\013HBase.proto\032\014C" + - "lient.proto\032\023ClusterStatus.proto\032\023ErrorH" + - "andling.proto\032\017Procedure.proto\032\013Quota.pr" + - "oto\"\234\001\n\020AddColumnRequest\022\'\n\ntable_name\030\001" + - " \002(\0132\023.hbase.pb.TableName\0225\n\017column_fami" + - "lies\030\002 \002(\0132\034.hbase.pb.ColumnFamilySchema" + - "\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:" + - "\0010\"$\n\021AddColumnResponse\022\017\n\007proc_id\030\001 \001(\004" + - "\"}\n\023DeleteColumnRequest\022\'\n\ntable_name\030\001 " + - "\002(\0132\023.hbase.pb.TableName\022\023\n\013column_name\030", - "\002 \002(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004" + - " \001(\004:\0010\"\'\n\024DeleteColumnResponse\022\017\n\007proc_" + - "id\030\001 \001(\004\"\237\001\n\023ModifyColumnRequest\022\'\n\ntabl" + - "e_name\030\001 \002(\0132\023.hbase.pb.TableName\0225\n\017col" + - "umn_families\030\002 \002(\0132\034.hbase.pb.ColumnFami" + - "lySchema\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonc" + - "e\030\004 \001(\004:\0010\"\'\n\024ModifyColumnResponse\022\017\n\007pr" + - "oc_id\030\001 \001(\004\"n\n\021MoveRegionRequest\022)\n\006regi" + - "on\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\020d" + - "est_server_name\030\002 \001(\0132\024.hbase.pb.ServerN", - "ame\"\024\n\022MoveRegionResponse\"\222\001\n\035DispatchMe" + - "rgingRegionsRequest\022+\n\010region_a\030\001 \002(\0132\031." + - "hbase.pb.RegionSpecifier\022+\n\010region_b\030\002 \002" + - "(\0132\031.hbase.pb.RegionSpecifier\022\027\n\010forcibl" + - "e\030\003 \001(\010:\005false\" \n\036DispatchMergingRegions" + - "Response\"@\n\023AssignRegionRequest\022)\n\006regio" + - "n\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\"\026\n\024As" + - "signRegionResponse\"X\n\025UnassignRegionRequ" + - "est\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpe" + - "cifier\022\024\n\005force\030\002 \001(\010:\005false\"\030\n\026Unassign", - "RegionResponse\"A\n\024OfflineRegionRequest\022)" + - "\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifie" + - "r\"\027\n\025OfflineRegionResponse\"\177\n\022CreateTabl" + - "eRequest\022+\n\014table_schema\030\001 \002(\0132\025.hbase.p" + - "b.TableSchema\022\022\n\nsplit_keys\030\002 \003(\014\022\026\n\013non" + - "ce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"&\n\023C" + - "reateTableResponse\022\017\n\007proc_id\030\001 \001(\004\"g\n\022D" + - "eleteTableRequest\022\'\n\ntable_name\030\001 \002(\0132\023." + - "hbase.pb.TableName\022\026\n\013nonce_group\030\002 \001(\004:" + - "\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"&\n\023DeleteTableRespo", - "nse\022\017\n\007proc_id\030\001 \001(\004\"\207\001\n\024TruncateTableRe" + - "quest\022&\n\ttableName\030\001 \002(\0132\023.hbase.pb.Tabl" + - "eName\022\035\n\016preserveSplits\030\002 \001(\010:\005false\022\026\n\013" + - "nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"(" + - "\n\025TruncateTableResponse\022\017\n\007proc_id\030\001 \001(\004" + - "\"g\n\022EnableTableRequest\022\'\n\ntable_name\030\001 \002" + + "\n\014Master.proto\022\010hbase.pb\032\013HBase.proto\032\014B" + + "ackup.proto\032\014Client.proto\032\023ClusterStatus" + + ".proto\032\023ErrorHandling.proto\032\017Procedure.p" + + "roto\032\013Quota.proto\"\234\001\n\020AddColumnRequest\022\'" + + "\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\022" + + "5\n\017column_families\030\002 \002(\0132\034.hbase.pb.Colu" + + "mnFamilySchema\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020" + + "\n\005nonce\030\004 \001(\004:\0010\"$\n\021AddColumnResponse\022\017\n" + + "\007proc_id\030\001 \001(\004\"}\n\023DeleteColumnRequest\022\'\n" + + "\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\022\023", + "\n\013column_name\030\002 \002(\014\022\026\n\013nonce_group\030\003 \001(\004" + + ":\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"\'\n\024DeleteColumnRes" + + "ponse\022\017\n\007proc_id\030\001 \001(\004\"\237\001\n\023ModifyColumnR" + + "equest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Ta" + + "bleName\0225\n\017column_families\030\002 \002(\0132\034.hbase" + + ".pb.ColumnFamilySchema\022\026\n\013nonce_group\030\003 " + + "\001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"\'\n\024ModifyColumn" + + "Response\022\017\n\007proc_id\030\001 \001(\004\"n\n\021MoveRegionR" + + "equest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Region" + + "Specifier\022.\n\020dest_server_name\030\002 \001(\0132\024.hb", + "ase.pb.ServerName\"\024\n\022MoveRegionResponse\"" + + "\222\001\n\035DispatchMergingRegionsRequest\022+\n\010reg" + + "ion_a\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022+" + + "\n\010region_b\030\002 \002(\0132\031.hbase.pb.RegionSpecif" + + "ier\022\027\n\010forcible\030\003 \001(\010:\005false\" \n\036Dispatch" + + "MergingRegionsResponse\"@\n\023AssignRegionRe" + + "quest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionS" + + "pecifier\"\026\n\024AssignRegionResponse\"X\n\025Unas" + + "signRegionRequest\022)\n\006region\030\001 \002(\0132\031.hbas" + + "e.pb.RegionSpecifier\022\024\n\005force\030\002 \001(\010:\005fal", + "se\"\030\n\026UnassignRegionResponse\"A\n\024OfflineR" + + "egionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb." + + "RegionSpecifier\"\027\n\025OfflineRegionResponse" + + "\"\177\n\022CreateTableRequest\022+\n\014table_schema\030\001" + + " \002(\0132\025.hbase.pb.TableSchema\022\022\n\nsplit_key" + + "s\030\002 \003(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce" + + "\030\004 \001(\004:\0010\"&\n\023CreateTableResponse\022\017\n\007proc" + + "_id\030\001 \001(\004\"g\n\022DeleteTableRequest\022\'\n\ntable" + + "_name\030\001 \002(\0132\023.hbase.pb.TableName\022\026\n\013nonc" + + "e_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"&\n\023De", + "leteTableResponse\022\017\n\007proc_id\030\001 \001(\004\"\207\001\n\024T" + + "runcateTableRequest\022&\n\ttableName\030\001 \002(\0132\023" + + ".hbase.pb.TableName\022\035\n\016preserveSplits\030\002 " + + "\001(\010:\005false\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005no" + + "nce\030\004 \001(\004:\0010\"(\n\025TruncateTableResponse\022\017\n" + + "\007proc_id\030\001 \001(\004\"g\n\022EnableTableRequest\022\'\n\n" + + "table_name\030\001 \002(\0132\023.hbase.pb.TableName\022\026\n" + + "\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"" + + "&\n\023EnableTableResponse\022\017\n\007proc_id\030\001 \001(\004\"" + + "h\n\023DisableTableRequest\022\'\n\ntable_name\030\001 \002", "(\0132\023.hbase.pb.TableName\022\026\n\013nonce_group\030\002" + - " \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"&\n\023EnableTable" + - "Response\022\017\n\007proc_id\030\001 \001(\004\"h\n\023DisableTabl" + - "eRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.", - "TableName\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005non" + - "ce\030\003 \001(\004:\0010\"\'\n\024DisableTableResponse\022\017\n\007p" + - "roc_id\030\001 \001(\004\"\224\001\n\022ModifyTableRequest\022\'\n\nt" + - "able_name\030\001 \002(\0132\023.hbase.pb.TableName\022+\n\014" + - "table_schema\030\002 \002(\0132\025.hbase.pb.TableSchem" + - "a\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004" + - ":\0010\"&\n\023ModifyTableResponse\022\017\n\007proc_id\030\001 " + - "\001(\004\"~\n\026CreateNamespaceRequest\022:\n\023namespa" + - "ceDescriptor\030\001 \002(\0132\035.hbase.pb.NamespaceD" + - "escriptor\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005non", - "ce\030\003 \001(\004:\0010\"*\n\027CreateNamespaceResponse\022\017" + - "\n\007proc_id\030\001 \001(\004\"Y\n\026DeleteNamespaceReques" + - "t\022\025\n\rnamespaceName\030\001 \002(\t\022\026\n\013nonce_group\030" + - "\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027DeleteName" + - "spaceResponse\022\017\n\007proc_id\030\001 \001(\004\"~\n\026Modify" + - "NamespaceRequest\022:\n\023namespaceDescriptor\030" + - "\001 \002(\0132\035.hbase.pb.NamespaceDescriptor\022\026\n\013" + - "nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*" + - "\n\027ModifyNamespaceResponse\022\017\n\007proc_id\030\001 \001" + - "(\004\"6\n\035GetNamespaceDescriptorRequest\022\025\n\rn", - "amespaceName\030\001 \002(\t\"\\\n\036GetNamespaceDescri" + - "ptorResponse\022:\n\023namespaceDescriptor\030\001 \002(" + - "\0132\035.hbase.pb.NamespaceDescriptor\"!\n\037List" + - "NamespaceDescriptorsRequest\"^\n ListNames" + - "paceDescriptorsResponse\022:\n\023namespaceDesc" + - "riptor\030\001 \003(\0132\035.hbase.pb.NamespaceDescrip" + - "tor\"?\n&ListTableDescriptorsByNamespaceRe" + - "quest\022\025\n\rnamespaceName\030\001 \002(\t\"U\n\'ListTabl" + - "eDescriptorsByNamespaceResponse\022*\n\013table" + - "Schema\030\001 \003(\0132\025.hbase.pb.TableSchema\"9\n L", - "istTableNamesByNamespaceRequest\022\025\n\rnames" + - "paceName\030\001 \002(\t\"K\n!ListTableNamesByNamesp" + - "aceResponse\022&\n\ttableName\030\001 \003(\0132\023.hbase.p" + - "b.TableName\"\021\n\017ShutdownRequest\"\022\n\020Shutdo" + - "wnResponse\"\023\n\021StopMasterRequest\"\024\n\022StopM" + - "asterResponse\"\037\n\016BalanceRequest\022\r\n\005force" + - "\030\001 \001(\010\"\'\n\017BalanceResponse\022\024\n\014balancer_ra" + - "n\030\001 \002(\010\"<\n\031SetBalancerRunningRequest\022\n\n\002" + - "on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\"8\n\032SetBala" + - "ncerRunningResponse\022\032\n\022prev_balance_valu", - "e\030\001 \001(\010\"\032\n\030IsBalancerEnabledRequest\",\n\031I" + - "sBalancerEnabledResponse\022\017\n\007enabled\030\001 \002(" + - "\010\"w\n\035SetSplitOrMergeEnabledRequest\022\017\n\007en" + - "abled\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\0220\n\014swit" + - "ch_types\030\003 \003(\0162\032.hbase.pb.MasterSwitchTy" + - "pe\"4\n\036SetSplitOrMergeEnabledResponse\022\022\n\n" + - "prev_value\030\001 \003(\010\"O\n\034IsSplitOrMergeEnable" + - "dRequest\022/\n\013switch_type\030\001 \002(\0162\032.hbase.pb" + - ".MasterSwitchType\"0\n\035IsSplitOrMergeEnabl" + - "edResponse\022\017\n\007enabled\030\001 \002(\010\"\022\n\020Normalize", - "Request\"+\n\021NormalizeResponse\022\026\n\016normaliz" + - "er_ran\030\001 \002(\010\")\n\033SetNormalizerRunningRequ" + - "est\022\n\n\002on\030\001 \002(\010\"=\n\034SetNormalizerRunningR" + - "esponse\022\035\n\025prev_normalizer_value\030\001 \001(\010\"\034" + - "\n\032IsNormalizerEnabledRequest\".\n\033IsNormal" + - "izerEnabledResponse\022\017\n\007enabled\030\001 \002(\010\"\027\n\025" + - "RunCatalogScanRequest\"-\n\026RunCatalogScanR" + - "esponse\022\023\n\013scan_result\030\001 \001(\005\"-\n\033EnableCa" + - "talogJanitorRequest\022\016\n\006enable\030\001 \002(\010\"2\n\034E" + - "nableCatalogJanitorResponse\022\022\n\nprev_valu", - "e\030\001 \001(\010\" \n\036IsCatalogJanitorEnabledReques" + - "t\"0\n\037IsCatalogJanitorEnabledResponse\022\r\n\005" + - "value\030\001 \002(\010\"B\n\017SnapshotRequest\022/\n\010snapsh" + - "ot\030\001 \002(\0132\035.hbase.pb.SnapshotDescription\"" + - ",\n\020SnapshotResponse\022\030\n\020expected_timeout\030" + - "\001 \002(\003\"\036\n\034GetCompletedSnapshotsRequest\"Q\n" + - "\035GetCompletedSnapshotsResponse\0220\n\tsnapsh" + - "ots\030\001 \003(\0132\035.hbase.pb.SnapshotDescription" + - "\"H\n\025DeleteSnapshotRequest\022/\n\010snapshot\030\001 " + - "\002(\0132\035.hbase.pb.SnapshotDescription\"\030\n\026De", - "leteSnapshotResponse\"I\n\026RestoreSnapshotR" + - "equest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.Snap" + - "shotDescription\"\031\n\027RestoreSnapshotRespon" + - "se\"H\n\025IsSnapshotDoneRequest\022/\n\010snapshot\030" + - "\001 \001(\0132\035.hbase.pb.SnapshotDescription\"^\n\026" + - "IsSnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:\005fa" + - "lse\022/\n\010snapshot\030\002 \001(\0132\035.hbase.pb.Snapsho" + - "tDescription\"O\n\034IsRestoreSnapshotDoneReq" + - "uest\022/\n\010snapshot\030\001 \001(\0132\035.hbase.pb.Snapsh" + - "otDescription\"4\n\035IsRestoreSnapshotDoneRe", - "sponse\022\023\n\004done\030\001 \001(\010:\005false\"F\n\033GetSchema" + - "AlterStatusRequest\022\'\n\ntable_name\030\001 \002(\0132\023" + - ".hbase.pb.TableName\"T\n\034GetSchemaAlterSta" + - "tusResponse\022\035\n\025yet_to_update_regions\030\001 \001" + - "(\r\022\025\n\rtotal_regions\030\002 \001(\r\"\213\001\n\032GetTableDe" + - "scriptorsRequest\022(\n\013table_names\030\001 \003(\0132\023." + - "hbase.pb.TableName\022\r\n\005regex\030\002 \001(\t\022!\n\022inc" + - "lude_sys_tables\030\003 \001(\010:\005false\022\021\n\tnamespac" + - "e\030\004 \001(\t\"J\n\033GetTableDescriptorsResponse\022+" + - "\n\014table_schema\030\001 \003(\0132\025.hbase.pb.TableSch", - "ema\"[\n\024GetTableNamesRequest\022\r\n\005regex\030\001 \001" + - "(\t\022!\n\022include_sys_tables\030\002 \001(\010:\005false\022\021\n" + - "\tnamespace\030\003 \001(\t\"A\n\025GetTableNamesRespons" + - "e\022(\n\013table_names\030\001 \003(\0132\023.hbase.pb.TableN" + - "ame\"?\n\024GetTableStateRequest\022\'\n\ntable_nam" + - "e\030\001 \002(\0132\023.hbase.pb.TableName\"B\n\025GetTable" + - "StateResponse\022)\n\013table_state\030\001 \002(\0132\024.hba" + - "se.pb.TableState\"\031\n\027GetClusterStatusRequ" + - "est\"K\n\030GetClusterStatusResponse\022/\n\016clust" + - "er_status\030\001 \002(\0132\027.hbase.pb.ClusterStatus", - "\"\030\n\026IsMasterRunningRequest\"4\n\027IsMasterRu" + - "nningResponse\022\031\n\021is_master_running\030\001 \002(\010" + - "\"I\n\024ExecProcedureRequest\0221\n\tprocedure\030\001 " + - "\002(\0132\036.hbase.pb.ProcedureDescription\"F\n\025E" + - "xecProcedureResponse\022\030\n\020expected_timeout" + - "\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n\026IsProcedu" + - "reDoneRequest\0221\n\tprocedure\030\001 \001(\0132\036.hbase" + - ".pb.ProcedureDescription\"`\n\027IsProcedureD" + - "oneResponse\022\023\n\004done\030\001 \001(\010:\005false\0220\n\010snap" + - "shot\030\002 \001(\0132\036.hbase.pb.ProcedureDescripti", - "on\",\n\031GetProcedureResultRequest\022\017\n\007proc_" + - "id\030\001 \002(\004\"\371\001\n\032GetProcedureResultResponse\022" + - "9\n\005state\030\001 \002(\0162*.hbase.pb.GetProcedureRe" + - "sultResponse.State\022\022\n\nstart_time\030\002 \001(\004\022\023" + - "\n\013last_update\030\003 \001(\004\022\016\n\006result\030\004 \001(\014\0224\n\te" + - "xception\030\005 \001(\0132!.hbase.pb.ForeignExcepti" + - "onMessage\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007RUN" + - "NING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortProcedureRe" + - "quest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayInterruptIf" + - "Running\030\002 \001(\010:\004true\"6\n\026AbortProcedureRes", - "ponse\022\034\n\024is_procedure_aborted\030\001 \002(\010\"\027\n\025L" + - "istProceduresRequest\"@\n\026ListProceduresRe" + - "sponse\022&\n\tprocedure\030\001 \003(\0132\023.hbase.pb.Pro" + - "cedure\"\315\001\n\017SetQuotaRequest\022\021\n\tuser_name\030" + - "\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnamespace\030\003" + - " \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase.pb.Tabl" + - "eName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypass_glob" + - "als\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hbase.pb.T" + - "hrottleRequest\"\022\n\020SetQuotaResponse\"J\n\037Ma" + - "jorCompactionTimestampRequest\022\'\n\ntable_n", - "ame\030\001 \002(\0132\023.hbase.pb.TableName\"U\n(MajorC" + - "ompactionTimestampForRegionRequest\022)\n\006re" + - "gion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\"@\n" + - " MajorCompactionTimestampResponse\022\034\n\024com" + - "paction_timestamp\030\001 \002(\003\"\035\n\033SecurityCapab" + - "ilitiesRequest\"\354\001\n\034SecurityCapabilitiesR" + - "esponse\022G\n\014capabilities\030\001 \003(\01621.hbase.pb" + - ".SecurityCapabilitiesResponse.Capability" + - "\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHENTICATION" + - "\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n\rAUTHORI", - "ZATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003\022\023\n\017CELL" + - "_VISIBILITY\020\004*(\n\020MasterSwitchType\022\t\n\005SPL" + - "IT\020\000\022\t\n\005MERGE\020\0012\323(\n\rMasterService\022e\n\024Get" + + " \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"\'\n\024DisableTabl" + + "eResponse\022\017\n\007proc_id\030\001 \001(\004\"\224\001\n\022ModifyTab" + + "leRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb" + + ".TableName\022+\n\014table_schema\030\002 \002(\0132\025.hbase" + + ".pb.TableSchema\022\026\n\013nonce_group\030\003 \001(\004:\0010\022" + + "\020\n\005nonce\030\004 \001(\004:\0010\"&\n\023ModifyTableResponse" + + "\022\017\n\007proc_id\030\001 \001(\004\"~\n\026CreateNamespaceRequ" + + "est\022:\n\023namespaceDescriptor\030\001 \002(\0132\035.hbase" + + ".pb.NamespaceDescriptor\022\026\n\013nonce_group\030\002", + " \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027CreateNames" + + "paceResponse\022\017\n\007proc_id\030\001 \001(\004\"Y\n\026DeleteN" + + "amespaceRequest\022\025\n\rnamespaceName\030\001 \002(\t\022\026" + + "\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010" + + "\"*\n\027DeleteNamespaceResponse\022\017\n\007proc_id\030\001" + + " \001(\004\"~\n\026ModifyNamespaceRequest\022:\n\023namesp" + + "aceDescriptor\030\001 \002(\0132\035.hbase.pb.Namespace" + + "Descriptor\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005no" + + "nce\030\003 \001(\004:\0010\"*\n\027ModifyNamespaceResponse\022" + + "\017\n\007proc_id\030\001 \001(\004\"6\n\035GetNamespaceDescript", + "orRequest\022\025\n\rnamespaceName\030\001 \002(\t\"\\\n\036GetN" + + "amespaceDescriptorResponse\022:\n\023namespaceD" + + "escriptor\030\001 \002(\0132\035.hbase.pb.NamespaceDesc" + + "riptor\"!\n\037ListNamespaceDescriptorsReques" + + "t\"^\n ListNamespaceDescriptorsResponse\022:\n" + + "\023namespaceDescriptor\030\001 \003(\0132\035.hbase.pb.Na" + + "mespaceDescriptor\"?\n&ListTableDescriptor" + + "sByNamespaceRequest\022\025\n\rnamespaceName\030\001 \002" + + "(\t\"U\n\'ListTableDescriptorsByNamespaceRes" + + "ponse\022*\n\013tableSchema\030\001 \003(\0132\025.hbase.pb.Ta", + "bleSchema\"9\n ListTableNamesByNamespaceRe" + + "quest\022\025\n\rnamespaceName\030\001 \002(\t\"K\n!ListTabl" + + "eNamesByNamespaceResponse\022&\n\ttableName\030\001" + + " \003(\0132\023.hbase.pb.TableName\"\021\n\017ShutdownReq" + + "uest\"\022\n\020ShutdownResponse\"\023\n\021StopMasterRe" + + "quest\"\024\n\022StopMasterResponse\"\037\n\016BalanceRe" + + "quest\022\r\n\005force\030\001 \001(\010\"\'\n\017BalanceResponse\022" + + "\024\n\014balancer_ran\030\001 \002(\010\"<\n\031SetBalancerRunn" + + "ingRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchronous\030\002 " + + "\001(\010\"8\n\032SetBalancerRunningResponse\022\032\n\022pre", + "v_balance_value\030\001 \001(\010\"\032\n\030IsBalancerEnabl" + + "edRequest\",\n\031IsBalancerEnabledResponse\022\017" + + "\n\007enabled\030\001 \002(\010\"w\n\035SetSplitOrMergeEnable" + + "dRequest\022\017\n\007enabled\030\001 \002(\010\022\023\n\013synchronous" + + "\030\002 \001(\010\0220\n\014switch_types\030\003 \003(\0162\032.hbase.pb." + + "MasterSwitchType\"4\n\036SetSplitOrMergeEnabl" + + "edResponse\022\022\n\nprev_value\030\001 \003(\010\"O\n\034IsSpli" + + "tOrMergeEnabledRequest\022/\n\013switch_type\030\001 " + + "\002(\0162\032.hbase.pb.MasterSwitchType\"0\n\035IsSpl" + + "itOrMergeEnabledResponse\022\017\n\007enabled\030\001 \002(", + "\010\"\022\n\020NormalizeRequest\"+\n\021NormalizeRespon" + + "se\022\026\n\016normalizer_ran\030\001 \002(\010\")\n\033SetNormali" + + "zerRunningRequest\022\n\n\002on\030\001 \002(\010\"=\n\034SetNorm" + + "alizerRunningResponse\022\035\n\025prev_normalizer" + + "_value\030\001 \001(\010\"\034\n\032IsNormalizerEnabledReque" + + "st\".\n\033IsNormalizerEnabledResponse\022\017\n\007ena" + + "bled\030\001 \002(\010\"\027\n\025RunCatalogScanRequest\"-\n\026R" + + "unCatalogScanResponse\022\023\n\013scan_result\030\001 \001" + + "(\005\"-\n\033EnableCatalogJanitorRequest\022\016\n\006ena" + + "ble\030\001 \002(\010\"2\n\034EnableCatalogJanitorRespons", + "e\022\022\n\nprev_value\030\001 \001(\010\" \n\036IsCatalogJanito" + + "rEnabledRequest\"0\n\037IsCatalogJanitorEnabl" + + "edResponse\022\r\n\005value\030\001 \002(\010\"B\n\017SnapshotReq" + + "uest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.Snapsh" + + "otDescription\",\n\020SnapshotResponse\022\030\n\020exp" + + "ected_timeout\030\001 \002(\003\"\036\n\034GetCompletedSnaps" + + "hotsRequest\"Q\n\035GetCompletedSnapshotsResp" + + "onse\0220\n\tsnapshots\030\001 \003(\0132\035.hbase.pb.Snaps" + + "hotDescription\"H\n\025DeleteSnapshotRequest\022" + + "/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.SnapshotDes", + "cription\"\030\n\026DeleteSnapshotResponse\"I\n\026Re" + + "storeSnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035" + + ".hbase.pb.SnapshotDescription\"\031\n\027Restore" + + "SnapshotResponse\"H\n\025IsSnapshotDoneReques" + + "t\022/\n\010snapshot\030\001 \001(\0132\035.hbase.pb.SnapshotD" + + "escription\"^\n\026IsSnapshotDoneResponse\022\023\n\004" + + "done\030\001 \001(\010:\005false\022/\n\010snapshot\030\002 \001(\0132\035.hb" + + "ase.pb.SnapshotDescription\"O\n\034IsRestoreS" + + "napshotDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.h" + + "base.pb.SnapshotDescription\"4\n\035IsRestore", + "SnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:\005fals" + + "e\"F\n\033GetSchemaAlterStatusRequest\022\'\n\ntabl" + + "e_name\030\001 \002(\0132\023.hbase.pb.TableName\"T\n\034Get" + + "SchemaAlterStatusResponse\022\035\n\025yet_to_upda" + + "te_regions\030\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"" + + "\213\001\n\032GetTableDescriptorsRequest\022(\n\013table_" + + "names\030\001 \003(\0132\023.hbase.pb.TableName\022\r\n\005rege" + + "x\030\002 \001(\t\022!\n\022include_sys_tables\030\003 \001(\010:\005fal" + + "se\022\021\n\tnamespace\030\004 \001(\t\"J\n\033GetTableDescrip" + + "torsResponse\022+\n\014table_schema\030\001 \003(\0132\025.hba", + "se.pb.TableSchema\"[\n\024GetTableNamesReques" + + "t\022\r\n\005regex\030\001 \001(\t\022!\n\022include_sys_tables\030\002" + + " \001(\010:\005false\022\021\n\tnamespace\030\003 \001(\t\"A\n\025GetTab" + + "leNamesResponse\022(\n\013table_names\030\001 \003(\0132\023.h" + + "base.pb.TableName\"?\n\024GetTableStateReques" + + "t\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableNa" + + "me\"B\n\025GetTableStateResponse\022)\n\013table_sta" + + "te\030\001 \002(\0132\024.hbase.pb.TableState\"\031\n\027GetClu" + + "sterStatusRequest\"K\n\030GetClusterStatusRes" + + "ponse\022/\n\016cluster_status\030\001 \002(\0132\027.hbase.pb", + ".ClusterStatus\"\030\n\026IsMasterRunningRequest" + + "\"4\n\027IsMasterRunningResponse\022\031\n\021is_master" + + "_running\030\001 \002(\010\"I\n\024ExecProcedureRequest\0221" + + "\n\tprocedure\030\001 \002(\0132\036.hbase.pb.ProcedureDe" + + "scription\"F\n\025ExecProcedureResponse\022\030\n\020ex" + + "pected_timeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(" + + "\014\"K\n\026IsProcedureDoneRequest\0221\n\tprocedure" + + "\030\001 \001(\0132\036.hbase.pb.ProcedureDescription\"`" + + "\n\027IsProcedureDoneResponse\022\023\n\004done\030\001 \001(\010:" + + "\005false\0220\n\010snapshot\030\002 \001(\0132\036.hbase.pb.Proc", + "edureDescription\",\n\031GetProcedureResultRe" + + "quest\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetProcedureR" + + "esultResponse\0229\n\005state\030\001 \002(\0162*.hbase.pb." + + "GetProcedureResultResponse.State\022\022\n\nstar" + + "t_time\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006res" + + "ult\030\004 \001(\014\0224\n\texception\030\005 \001(\0132!.hbase.pb." + + "ForeignExceptionMessage\"1\n\005State\022\r\n\tNOT_" + + "FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M\n\025Ab" + + "ortProcedureRequest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025" + + "mayInterruptIfRunning\030\002 \001(\010:\004true\"6\n\026Abo", + "rtProcedureResponse\022\034\n\024is_procedure_abor" + + "ted\030\001 \002(\010\"\027\n\025ListProceduresRequest\"@\n\026Li" + + "stProceduresResponse\022&\n\tprocedure\030\001 \003(\0132" + + "\023.hbase.pb.Procedure\"\315\001\n\017SetQuotaRequest" + + "\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022" + + "\021\n\tnamespace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023" + + ".hbase.pb.TableName\022\022\n\nremove_all\030\005 \001(\010\022" + + "\026\n\016bypass_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(" + + "\0132\031.hbase.pb.ThrottleRequest\"\022\n\020SetQuota" + + "Response\"J\n\037MajorCompactionTimestampRequ", + "est\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Table" + + "Name\"U\n(MajorCompactionTimestampForRegio" + + "nRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Regi" + + "onSpecifier\"@\n MajorCompactionTimestampR" + + "esponse\022\034\n\024compaction_timestamp\030\001 \002(\003\"\035\n" + + "\033SecurityCapabilitiesRequest\"\354\001\n\034Securit" + + "yCapabilitiesResponse\022G\n\014capabilities\030\001 " + + "\003(\01621.hbase.pb.SecurityCapabilitiesRespo" + + "nse.Capability\"\202\001\n\nCapability\022\031\n\025SIMPLE_" + + "AUTHENTICATION\020\000\022\031\n\025SECURE_AUTHENTICATIO", + "N\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZA" + + "TION\020\003\022\023\n\017CELL_VISIBILITY\020\004\"\233\001\n\023BackupTa" + + "blesRequest\022\"\n\004type\030\001 \002(\0162\024.hbase.pb.Bac" + + "kupType\022#\n\006tables\030\002 \003(\0132\023.hbase.pb.Table" + + "Name\022\027\n\017target_root_dir\030\003 \002(\t\022\017\n\007workers" + + "\030\004 \001(\003\022\021\n\tbandwidth\030\005 \001(\003\":\n\024BackupTable" + + "sResponse\022\017\n\007proc_id\030\001 \001(\004\022\021\n\tbackup_id\030" + + "\002 \001(\t\"L\n\022AbortBackupRequest\022\021\n\tbackup_id" + + "\030\001 \002(\t\022#\n\025mayInterruptIfRunning\030\002 \001(\010:\004t" + + "rue\"0\n\023AbortBackupResponse\022\031\n\021is_backup_", + "aborted\030\001 \002(\010*(\n\020MasterSwitchType\022\t\n\005SPL" + + "IT\020\000\022\t\n\005MERGE\020\0012\356)\n\rMasterService\022e\n\024Get" + "SchemaAlterStatus\022%.hbase.pb.GetSchemaAl" + "terStatusRequest\032&.hbase.pb.GetSchemaAlt" + "erStatusResponse\022b\n\023GetTableDescriptors\022" + "$.hbase.pb.GetTableDescriptorsRequest\032%." + "hbase.pb.GetTableDescriptorsResponse\022P\n\r" + "GetTableNames\022\036.hbase.pb.GetTableNamesRe" + - "quest\032\037.hbase.pb.GetTableNamesResponse\022Y", - "\n\020GetClusterStatus\022!.hbase.pb.GetCluster" + + "quest\032\037.hbase.pb.GetTableNamesResponse\022Y" + + "\n\020GetClusterStatus\022!.hbase.pb.GetCluster", "StatusRequest\032\".hbase.pb.GetClusterStatu" + "sResponse\022V\n\017IsMasterRunning\022 .hbase.pb." + "IsMasterRunningRequest\032!.hbase.pb.IsMast" + @@ -64700,8 +67719,8 @@ public final class MasterProtos { "esponse\022M\n\014DeleteColumn\022\035.hbase.pb.Delet" + "eColumnRequest\032\036.hbase.pb.DeleteColumnRe" + "sponse\022M\n\014ModifyColumn\022\035.hbase.pb.Modify" + - "ColumnRequest\032\036.hbase.pb.ModifyColumnRes", - "ponse\022G\n\nMoveRegion\022\033.hbase.pb.MoveRegio" + + "ColumnRequest\032\036.hbase.pb.ModifyColumnRes" + + "ponse\022G\n\nMoveRegion\022\033.hbase.pb.MoveRegio", "nRequest\032\034.hbase.pb.MoveRegionResponse\022k" + "\n\026DispatchMergingRegions\022\'.hbase.pb.Disp" + "atchMergingRegionsRequest\032(.hbase.pb.Dis" + @@ -64710,8 +67729,8 @@ public final class MasterProtos { "se.pb.AssignRegionResponse\022S\n\016UnassignRe" + "gion\022\037.hbase.pb.UnassignRegionRequest\032 ." + "hbase.pb.UnassignRegionResponse\022P\n\rOffli" + - "neRegion\022\036.hbase.pb.OfflineRegionRequest", - "\032\037.hbase.pb.OfflineRegionResponse\022J\n\013Del" + + "neRegion\022\036.hbase.pb.OfflineRegionRequest" + + "\032\037.hbase.pb.OfflineRegionResponse\022J\n\013Del", "eteTable\022\034.hbase.pb.DeleteTableRequest\032\035" + ".hbase.pb.DeleteTableResponse\022P\n\rtruncat" + "eTable\022\036.hbase.pb.TruncateTableRequest\032\037" + @@ -64720,8 +67739,8 @@ public final class MasterProtos { "base.pb.EnableTableResponse\022M\n\014DisableTa" + "ble\022\035.hbase.pb.DisableTableRequest\032\036.hba" + "se.pb.DisableTableResponse\022J\n\013ModifyTabl" + - "e\022\034.hbase.pb.ModifyTableRequest\032\035.hbase.", - "pb.ModifyTableResponse\022J\n\013CreateTable\022\034." + + "e\022\034.hbase.pb.ModifyTableRequest\032\035.hbase." + + "pb.ModifyTableResponse\022J\n\013CreateTable\022\034.", "hbase.pb.CreateTableRequest\032\035.hbase.pb.C" + "reateTableResponse\022A\n\010Shutdown\022\031.hbase.p" + "b.ShutdownRequest\032\032.hbase.pb.ShutdownRes" + @@ -64730,8 +67749,8 @@ public final class MasterProtos { "\n\007Balance\022\030.hbase.pb.BalanceRequest\032\031.hb" + "ase.pb.BalanceResponse\022_\n\022SetBalancerRun" + "ning\022#.hbase.pb.SetBalancerRunningReques" + - "t\032$.hbase.pb.SetBalancerRunningResponse\022", - "\\\n\021IsBalancerEnabled\022\".hbase.pb.IsBalanc" + + "t\032$.hbase.pb.SetBalancerRunningResponse\022" + + "\\\n\021IsBalancerEnabled\022\".hbase.pb.IsBalanc", "erEnabledRequest\032#.hbase.pb.IsBalancerEn" + "abledResponse\022k\n\026SetSplitOrMergeEnabled\022" + "\'.hbase.pb.SetSplitOrMergeEnabledRequest" + @@ -64740,8 +67759,8 @@ public final class MasterProtos { "sSplitOrMergeEnabledRequest\032\'.hbase.pb.I" + "sSplitOrMergeEnabledResponse\022D\n\tNormaliz" + "e\022\032.hbase.pb.NormalizeRequest\032\033.hbase.pb" + - ".NormalizeResponse\022e\n\024SetNormalizerRunni", - "ng\022%.hbase.pb.SetNormalizerRunningReques" + + ".NormalizeResponse\022e\n\024SetNormalizerRunni" + + "ng\022%.hbase.pb.SetNormalizerRunningReques", "t\032&.hbase.pb.SetNormalizerRunningRespons" + "e\022b\n\023IsNormalizerEnabled\022$.hbase.pb.IsNo" + "rmalizerEnabledRequest\032%.hbase.pb.IsNorm" + @@ -64750,8 +67769,8 @@ public final class MasterProtos { ".pb.RunCatalogScanResponse\022e\n\024EnableCata" + "logJanitor\022%.hbase.pb.EnableCatalogJanit" + "orRequest\032&.hbase.pb.EnableCatalogJanito" + - "rResponse\022n\n\027IsCatalogJanitorEnabled\022(.h", - "base.pb.IsCatalogJanitorEnabledRequest\032)" + + "rResponse\022n\n\027IsCatalogJanitorEnabled\022(.h" + + "base.pb.IsCatalogJanitorEnabledRequest\032)", ".hbase.pb.IsCatalogJanitorEnabledRespons" + "e\022^\n\021ExecMasterService\022#.hbase.pb.Coproc" + "essorServiceRequest\032$.hbase.pb.Coprocess" + @@ -64760,8 +67779,8 @@ public final class MasterProtos { "onse\022h\n\025GetCompletedSnapshots\022&.hbase.pb" + ".GetCompletedSnapshotsRequest\032\'.hbase.pb" + ".GetCompletedSnapshotsResponse\022S\n\016Delete" + - "Snapshot\022\037.hbase.pb.DeleteSnapshotReques", - "t\032 .hbase.pb.DeleteSnapshotResponse\022S\n\016I" + + "Snapshot\022\037.hbase.pb.DeleteSnapshotReques" + + "t\032 .hbase.pb.DeleteSnapshotResponse\022S\n\016I", "sSnapshotDone\022\037.hbase.pb.IsSnapshotDoneR" + "equest\032 .hbase.pb.IsSnapshotDoneResponse" + "\022V\n\017RestoreSnapshot\022 .hbase.pb.RestoreSn" + @@ -64770,8 +67789,8 @@ public final class MasterProtos { "e.pb.IsRestoreSnapshotDoneRequest\032\'.hbas" + "e.pb.IsRestoreSnapshotDoneResponse\022P\n\rEx" + "ecProcedure\022\036.hbase.pb.ExecProcedureRequ" + - "est\032\037.hbase.pb.ExecProcedureResponse\022W\n\024", - "ExecProcedureWithRet\022\036.hbase.pb.ExecProc" + + "est\032\037.hbase.pb.ExecProcedureResponse\022W\n\024" + + "ExecProcedureWithRet\022\036.hbase.pb.ExecProc", "edureRequest\032\037.hbase.pb.ExecProcedureRes" + "ponse\022V\n\017IsProcedureDone\022 .hbase.pb.IsPr" + "ocedureDoneRequest\032!.hbase.pb.IsProcedur" + @@ -64780,8 +67799,8 @@ public final class MasterProtos { "difyNamespaceResponse\022V\n\017CreateNamespace" + "\022 .hbase.pb.CreateNamespaceRequest\032!.hba" + "se.pb.CreateNamespaceResponse\022V\n\017DeleteN" + - "amespace\022 .hbase.pb.DeleteNamespaceReque", - "st\032!.hbase.pb.DeleteNamespaceResponse\022k\n" + + "amespace\022 .hbase.pb.DeleteNamespaceReque" + + "st\032!.hbase.pb.DeleteNamespaceResponse\022k\n", "\026GetNamespaceDescriptor\022\'.hbase.pb.GetNa" + "mespaceDescriptorRequest\032(.hbase.pb.GetN" + "amespaceDescriptorResponse\022q\n\030ListNamesp" + @@ -64790,8 +67809,8 @@ public final class MasterProtos { "ceDescriptorsResponse\022\206\001\n\037ListTableDescr" + "iptorsByNamespace\0220.hbase.pb.ListTableDe" + "scriptorsByNamespaceRequest\0321.hbase.pb.L" + - "istTableDescriptorsByNamespaceResponse\022t", - "\n\031ListTableNamesByNamespace\022*.hbase.pb.L" + + "istTableDescriptorsByNamespaceResponse\022t" + + "\n\031ListTableNamesByNamespace\022*.hbase.pb.L", "istTableNamesByNamespaceRequest\032+.hbase." + "pb.ListTableNamesByNamespaceResponse\022P\n\r" + "GetTableState\022\036.hbase.pb.GetTableStateRe" + @@ -64800,8 +67819,8 @@ public final class MasterProtos { "hbase.pb.SetQuotaResponse\022x\n\037getLastMajo" + "rCompactionTimestamp\022).hbase.pb.MajorCom" + "pactionTimestampRequest\032*.hbase.pb.Major" + - "CompactionTimestampResponse\022\212\001\n(getLastM", - "ajorCompactionTimestampForRegion\0222.hbase" + + "CompactionTimestampResponse\022\212\001\n(getLastM" + + "ajorCompactionTimestampForRegion\0222.hbase", ".pb.MajorCompactionTimestampForRegionReq" + "uest\032*.hbase.pb.MajorCompactionTimestamp" + "Response\022_\n\022getProcedureResult\022#.hbase.p" + @@ -64810,12 +67829,16 @@ public final class MasterProtos { "Capabilities\022%.hbase.pb.SecurityCapabili" + "tiesRequest\032&.hbase.pb.SecurityCapabilit" + "iesResponse\022S\n\016AbortProcedure\022\037.hbase.pb" + - ".AbortProcedureRequest\032 .hbase.pb.AbortP", - "rocedureResponse\022S\n\016ListProcedures\022\037.hba" + + ".AbortProcedureRequest\032 .hbase.pb.AbortP" + + "rocedureResponse\022S\n\016ListProcedures\022\037.hba", "se.pb.ListProceduresRequest\032 .hbase.pb.L" + - "istProceduresResponseBB\n*org.apache.hado" + - "op.hbase.protobuf.generatedB\014MasterProto" + - "sH\001\210\001\001\240\001\001" + "istProceduresResponse\022M\n\014backupTables\022\035." + + "hbase.pb.BackupTablesRequest\032\036.hbase.pb." + + "BackupTablesResponse\022J\n\013AbortBackup\022\034.hb" + + "ase.pb.AbortBackupRequest\032\035.hbase.pb.Abo" + + "rtBackupResponseBB\n*org.apache.hadoop.hb" + + "ase.protobuf.generatedB\014MasterProtosH\001\210\001" + + "\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -65476,6 +68499,30 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor, new java.lang.String[] { "Capabilities", }); + internal_static_hbase_pb_BackupTablesRequest_descriptor = + getDescriptor().getMessageTypes().get(109); + internal_static_hbase_pb_BackupTablesRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_BackupTablesRequest_descriptor, + new java.lang.String[] { "Type", "Tables", "TargetRootDir", "Workers", "Bandwidth", }); + internal_static_hbase_pb_BackupTablesResponse_descriptor = + getDescriptor().getMessageTypes().get(110); + internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_BackupTablesResponse_descriptor, + new java.lang.String[] { "ProcId", "BackupId", }); + internal_static_hbase_pb_AbortBackupRequest_descriptor = + getDescriptor().getMessageTypes().get(111); + internal_static_hbase_pb_AbortBackupRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_AbortBackupRequest_descriptor, + new java.lang.String[] { "BackupId", "MayInterruptIfRunning", }); + internal_static_hbase_pb_AbortBackupResponse_descriptor = + getDescriptor().getMessageTypes().get(112); + internal_static_hbase_pb_AbortBackupResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_AbortBackupResponse_descriptor, + new java.lang.String[] { "IsBackupAborted", }); return null; } }; @@ -65483,6 +68530,7 @@ public final class MasterProtos { .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.ClientProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.getDescriptor(), diff --git a/hbase-protocol/src/main/protobuf/Backup.proto b/hbase-protocol/src/main/protobuf/Backup.proto index 383b990971ba27ed81db4d8775026639f8362f79..9b7cf1fbd725bf94f6376c50bde6bfa1d2052805 100644 --- a/hbase-protocol/src/main/protobuf/Backup.proto +++ b/hbase-protocol/src/main/protobuf/Backup.proto @@ -27,6 +27,27 @@ option optimize_for = SPEED; import "HBase.proto"; +enum FullTableBackupState { + PRE_SNAPSHOT_TABLE = 1; + SNAPSHOT_TABLES = 2; + SNAPSHOT_COPY = 3; + BACKUP_COMPLETE = 4; +} + +enum IncrementalTableBackupState { + PREPARE_INCREMENTAL = 1; + INCREMENTAL_COPY = 2; + INCR_BACKUP_COMPLETE = 3; +} + +enum SnapshotTableState { + SNAPSHOT_TABLE = 1; +} +message SnapshotTableStateData { + required TableName table = 1; + required string snapshotName = 2; +} + enum BackupType { FULL = 0; INCREMENTAL = 1; @@ -103,3 +124,9 @@ message BackupContext { STORE_MANIFEST = 5; } } + +message BackupProcContext { + required BackupContext ctx = 1; + repeated ServerTimestamp server_timestamp = 2; +} + diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto index 79bb862f381b3a9831acede460e4a3a4bb7a2409..1cd369d4f615b815b1978ba38637249cb333f9bc 100644 --- a/hbase-protocol/src/main/protobuf/Master.proto +++ b/hbase-protocol/src/main/protobuf/Master.proto @@ -27,6 +27,7 @@ option java_generate_equals_and_hash = true; option optimize_for = SPEED; import "HBase.proto"; +import "Backup.proto"; import "Client.proto"; import "ClusterStatus.proto"; import "ErrorHandling.proto"; @@ -540,6 +541,28 @@ message SecurityCapabilitiesResponse { repeated Capability capabilities = 1; } +message BackupTablesRequest { + required BackupType type = 1; + repeated TableName tables = 2; + required string target_root_dir = 3; + optional int64 workers = 4; + optional int64 bandwidth = 5; +} + +message BackupTablesResponse { + optional uint64 proc_id = 1; + optional string backup_id = 2; +} + +message AbortBackupRequest { + required string backup_id = 1; + optional bool mayInterruptIfRunning = 2 [default = true]; +} + +message AbortBackupResponse { + required bool is_backup_aborted = 1; +} + service MasterService { /** Used by the client to get the number of regions that have received the updated schema */ rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest) @@ -814,4 +837,12 @@ service MasterService { /** returns a list of procedures */ rpc ListProcedures(ListProceduresRequest) returns(ListProceduresResponse); + + /** backup table set */ + rpc backupTables(BackupTablesRequest) + returns(BackupTablesResponse); + + /** Abort a backup */ + rpc AbortBackup(AbortBackupRequest) + returns(AbortBackupResponse); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java deleted file mode 100644 index 7c8ea39681c0e2b3bb2f6f67dc9794392ad97e94..0000000000000000000000000000000000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupClient.java +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.backup; - -import java.io.IOException; -import java.util.List; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.TableName; - -public interface BackupClient { - - public void setConf(Configuration conf); - - /** - * Send backup request to server, and monitor the progress if necessary - * @param backupType : full or incremental - * @param targetRootDir : the root path specified by user - * @param tableList : the table list specified by user - * @return backupId backup id - * @throws IOException exception - */ - public String create(BackupType backupType, List tableList, - String targetRootDir) throws IOException; - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java index 6fbfe189f14909d56ec73ed2b03bdd8bf8fb5217..e0c6483073a5ba6f85c54be52355e527d607e806 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreFactory.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.backup; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.backup.impl.BackupClientImpl; import org.apache.hadoop.hbase.backup.impl.BackupCopyService; import org.apache.hadoop.hbase.backup.impl.IncrementalRestoreService; import org.apache.hadoop.hbase.backup.impl.RestoreClientImpl; @@ -34,7 +33,6 @@ public final class BackupRestoreFactory { public final static String HBASE_INCR_RESTORE_IMPL_CLASS = "hbase.incremental.restore.class"; public final static String HBASE_BACKUP_COPY_IMPL_CLASS = "hbase.backup.copy.class"; - public final static String HBASE_BACKUP_CLIENT_IMPL_CLASS = "hbase.backup.client.class"; public final static String HBASE_RESTORE_CLIENT_IMPL_CLASS = "hbase.restore.client.class"; private BackupRestoreFactory(){ @@ -66,20 +64,6 @@ public final class BackupRestoreFactory { } /** - * Gets backup client implementation - * @param conf - configuration - * @return backup client - */ - public static BackupClient getBackupClient(Configuration conf) { - Class cls = - conf.getClass(HBASE_BACKUP_CLIENT_IMPL_CLASS, BackupClientImpl.class, - BackupClient.class); - BackupClient client = ReflectionUtils.newInstance(cls, conf); - client.setConf(conf); - return client; - } - - /** * Gets restore client implementation * @param conf - configuration * @return backup client diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java index 6e5a3556bf7d7e6196c88fc9b9c8d2329050969f..4423bccf183f8ee6dd47728632d61bb53c6a5a3b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java @@ -19,36 +19,19 @@ package org.apache.hadoop.hbase.backup; -import java.io.FileNotFoundException; import java.io.IOException; -import java.util.ArrayList; import java.util.HashMap; -import java.util.TreeMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupManifest; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.io.HFileLink; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; -import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; -import org.apache.hadoop.hbase.regionserver.HStore; -import org.apache.hadoop.hbase.regionserver.StoreFileInfo; -import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; -import org.apache.hadoop.hbase.snapshot.SnapshotManifest; -import org.apache.hadoop.hbase.util.Bytes; /** * View to an on-disk Backup Image FileSytem @@ -60,8 +43,6 @@ public class HBackupFileSystem { public static final Log LOG = LogFactory.getLog(HBackupFileSystem.class); private final String RESTORE_TMP_PATH = "/tmp"; - private final String[] ignoreDirs = { "recovered.edits" }; - private final Configuration conf; private final FileSystem fs; private final Path backupRootPath; @@ -69,10 +50,7 @@ public class HBackupFileSystem { private final String backupId; /** - * Create a view to the on-disk Backup Image. - * @param conf to use - * @param backupPath to where the backup Image stored - * @param backupId represent backup Image + * This is utility class. */ public HBackupFileSystem(final Configuration conf, final Path backupRootPath, final String backupId) throws IOException { @@ -81,36 +59,10 @@ public class HBackupFileSystem { this.backupRootPath = backupRootPath; this.backupId = backupId; // the backup ID for the lead backup Image this.restoreTmpPath = new Path(conf.get("hbase.fs.tmp.dir") != null? - conf.get("hbase.fs.tmp.dir"): RESTORE_TMP_PATH, + conf.get("hbase.fs.tmp.dir"): RESTORE_TMP_PATH, "restore"); } - public Path getBackupRootPath() { - return backupRootPath; - } - - public String getBackupId() { - return backupId; - } - - /** - * @param tableName is the table backed up - * @return {@link HTableDescriptor} saved in backup image of the table - */ - public HTableDescriptor getTableDesc(TableName tableName) - throws FileNotFoundException, IOException { - Path tableInfoPath = this.getTableInfoPath(tableName); - SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath); - SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc); - HTableDescriptor tableDescriptor = manifest.getTableDescriptor(); - if (!tableDescriptor.getNameAsString().equals(tableName)) { - LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " - + tableInfoPath.toString()); - LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getNameAsString()); - } - return tableDescriptor; - } - /** * Given the backup root dir, backup id and the table name, return the backup image location, * which is also where the backup manifest file is. return value look like: @@ -141,69 +93,6 @@ public class HBackupFileSystem { } /** - * return value represent path for: - * ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/.hbase-snapshot" - * @param backupRootPath backup root path - * @param tableName table name - * @param backupId backup Id - * @return path for snapshot - */ - public static Path getTableSnapshotPath(Path backupRootPath, TableName tableName, - String backupId) { - return new Path(getTableBackupPath(backupRootPath, tableName, backupId), - HConstants.SNAPSHOT_DIR_NAME); - } - - /** - * return value represent path for: - * "..../default/t1_dn/backup_1396650096738/.hbase-snapshot/snapshot_1396650097621_default_t1_dn" - * this path contains .snapshotinfo, .tabledesc (0.96 and 0.98) this path contains .snapshotinfo, - * .data.manifest (trunk) - * @param tableName table name - * @return path to table info - * @throws FileNotFoundException exception - * @throws IOException exception - */ - public Path getTableInfoPath(TableName tableName) - throws FileNotFoundException, IOException { - Path tableSnapShotPath = getTableSnapshotPath(backupRootPath, tableName, backupId); - Path tableInfoPath = null; - - // can't build the path directly as the timestamp values are different - FileStatus[] snapshots = fs.listStatus(tableSnapShotPath); - for (FileStatus snapshot : snapshots) { - tableInfoPath = snapshot.getPath(); - // SnapshotManifest.DATA_MANIFEST_NAME = "data.manifest"; - if (tableInfoPath.getName().endsWith("data.manifest")) { - break; - } - } - return tableInfoPath; - } - - /** - * return value represent path for: - * ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/archive/data/default/t1_dn" - * @param tabelName table name - * @return path to table archive - * @throws IOException exception - */ - public Path getTableArchivePath(TableName tableName) - throws IOException { - Path baseDir = new Path(getTableBackupPath(backupRootPath, tableName, backupId), - HConstants.HFILE_ARCHIVE_DIRECTORY); - Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR); - Path archivePath = new Path(dataDir, tableName.getNamespaceAsString()); - Path tableArchivePath = - new Path(archivePath, tableName.getQualifierAsString()); - if (!fs.exists(tableArchivePath) || !fs.getFileStatus(tableArchivePath).isDirectory()) { - LOG.debug("Folder tableArchivePath: " + tableArchivePath.toString() + " does not exists"); - tableArchivePath = null; // empty table has no archive - } - return tableArchivePath; - } - - /** * Given the backup root dir and the backup id, return the log file location for an incremental * backup. * @param backupRootDir backup root directory @@ -246,216 +135,6 @@ public class HBackupFileSystem { } /** - * Gets region list - * @param tableName table name - * @return RegionList region list - * @throws FileNotFoundException exception - * @throws IOException exception - */ - - public ArrayList getRegionList(TableName tableName) - throws FileNotFoundException, IOException { - Path tableArchivePath = this.getTableArchivePath(tableName); - ArrayList regionDirList = new ArrayList(); - FileStatus[] children = fs.listStatus(tableArchivePath); - for (FileStatus childStatus : children) { - // here child refer to each region(Name) - Path child = childStatus.getPath(); - regionDirList.add(child); - } - return regionDirList; - } - - /** - * Gets region list - * @param tableArchivePath table archive path - * @return RegionList region list - * @throws FileNotFoundException exception - * @throws IOException exception - */ - public ArrayList getRegionList(Path tableArchivePath) throws FileNotFoundException, - IOException { - ArrayList regionDirList = new ArrayList(); - FileStatus[] children = fs.listStatus(tableArchivePath); - for (FileStatus childStatus : children) { - // here child refer to each region(Name) - Path child = childStatus.getPath(); - regionDirList.add(child); - } - return regionDirList; - } - - /** - * Counts the number of files in all subdirectories of an HBase tables, i.e. HFiles. And finds the - * maximum number of files in one HBase table. - * @param tableArchivePath archive path - * @return the maximum number of files found in 1 HBase table - * @throws IOException exception - */ - public int getMaxNumberOfFilesInSubDir(Path tableArchivePath) throws IOException { - int result = 1; - ArrayList regionPathList = this.getRegionList(tableArchivePath); - // tableArchivePath = this.getTableArchivePath(tableName); - - if (regionPathList == null || regionPathList.size() == 0) { - throw new IllegalStateException("Cannot restore hbase table because directory '" - + tableArchivePath + "' is not a directory."); - } - - for (Path regionPath : regionPathList) { - result = Math.max(result, getNumberOfFilesInDir(regionPath)); - } - return result; - } - - /** - * Counts the number of files in all subdirectories of an HBase table, i.e. HFiles. - * @param regionPath Path to an HBase table directory - * @return the number of files all directories - * @throws IOException exception - */ - public int getNumberOfFilesInDir(Path regionPath) throws IOException { - int result = 0; - - if (!fs.exists(regionPath) || !fs.getFileStatus(regionPath).isDirectory()) { - throw new IllegalStateException("Cannot restore hbase table because directory '" - + regionPath.toString() + "' is not a directory."); - } - - FileStatus[] tableDirContent = fs.listStatus(regionPath); - for (FileStatus subDirStatus : tableDirContent) { - FileStatus[] colFamilies = fs.listStatus(subDirStatus.getPath()); - for (FileStatus colFamilyStatus : colFamilies) { - FileStatus[] colFamilyContent = fs.listStatus(colFamilyStatus.getPath()); - result += colFamilyContent.length; - } - } - return result; - } - - /** - * Duplicate the backup image if it's on local cluster - * @see HStore#bulkLoadHFile(String, long) - * @see HRegionFileSystem#bulkLoadStoreFile(String familyName, Path srcPath, long seqNum) - * @param tableArchivePath archive path - * @return the new tableArchivePath - * @throws IOException exception - */ - public Path checkLocalAndBackup(Path tableArchivePath) throws IOException { - // Move the file if it's on local cluster - boolean isCopyNeeded = false; - - FileSystem srcFs = tableArchivePath.getFileSystem(conf); - FileSystem desFs = FileSystem.get(conf); - if (tableArchivePath.getName().startsWith("/")) { - isCopyNeeded = true; - } else { - // This should match what is done in @see HRegionFileSystem#bulkLoadStoreFile(String, Path, - // long) - if (srcFs.getUri().equals(desFs.getUri())) { - LOG.debug("cluster hold the backup image: " + srcFs.getUri() + "; local cluster node: " - + desFs.getUri()); - isCopyNeeded = true; - } - } - if (isCopyNeeded) { - LOG.debug("File " + tableArchivePath + " on local cluster, back it up before restore"); - if (desFs.exists(restoreTmpPath)) { - try { - desFs.delete(restoreTmpPath, true); - } catch (IOException e) { - LOG.debug("Failed to delete path: " + restoreTmpPath - + ", need to check whether restore target DFS cluster is healthy"); - } - } - FileUtil.copy(srcFs, tableArchivePath, desFs, restoreTmpPath, false, conf); - LOG.debug("Copied to temporary path on local cluster: " + restoreTmpPath); - tableArchivePath = restoreTmpPath; - } - return tableArchivePath; - } - - /** - * Calculate region boundaries and add all the column families to the table descriptor - * @param regionDirList region dir list - * @return a set of keys to store the boundaries - */ - public byte[][] generateBoundaryKeys(ArrayList regionDirList) - throws FileNotFoundException, IOException { - TreeMap map = new TreeMap(Bytes.BYTES_COMPARATOR); - // Build a set of keys to store the boundaries - byte[][] keys = null; - // calculate region boundaries and add all the column families to the table descriptor - for (Path regionDir : regionDirList) { - LOG.debug("Parsing region dir: " + regionDir); - Path hfofDir = regionDir; - - if (!fs.exists(hfofDir)) { - LOG.warn("HFileOutputFormat dir " + hfofDir + " not found"); - } - - FileStatus[] familyDirStatuses = fs.listStatus(hfofDir); - if (familyDirStatuses == null) { - throw new IOException("No families found in " + hfofDir); - } - - for (FileStatus stat : familyDirStatuses) { - if (!stat.isDirectory()) { - LOG.warn("Skipping non-directory " + stat.getPath()); - continue; - } - boolean isIgnore = false; - String pathName = stat.getPath().getName(); - for (String ignore : ignoreDirs) { - if (pathName.contains(ignore)) { - LOG.warn("Skipping non-family directory" + pathName); - isIgnore = true; - break; - } - } - if (isIgnore) { - continue; - } - Path familyDir = stat.getPath(); - LOG.debug("Parsing family dir [" + familyDir.toString() + " in region [" + regionDir + "]"); - // Skip _logs, etc - if (familyDir.getName().startsWith("_") || familyDir.getName().startsWith(".")) { - continue; - } - - // start to parse hfile inside one family dir - Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir)); - for (Path hfile : hfiles) { - if (hfile.getName().startsWith("_") || hfile.getName().startsWith(".") - || StoreFileInfo.isReference(hfile.getName()) - || HFileLink.isHFileLink(hfile.getName())) { - continue; - } - HFile.Reader reader = HFile.createReader(fs, hfile, new CacheConfig(conf), conf); - final byte[] first, last; - try { - reader.loadFileInfo(); - first = reader.getFirstRowKey(); - last = reader.getLastRowKey(); - LOG.debug("Trying to figure out region boundaries hfile=" + hfile + " first=" - + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last)); - - // To eventually infer start key-end key boundaries - Integer value = map.containsKey(first) ? (Integer) map.get(first) : 0; - map.put(first, value + 1); - value = map.containsKey(last) ? (Integer) map.get(last) : 0; - map.put(last, value - 1); - } finally { - reader.close(); - } - } - } - } - keys = LoadIncrementalHFiles.inferBoundaries(map); - return keys; - } - - /** * Check whether the backup image path and there is manifest file in the path. * @param backupManifestMap If all the manifests are found, then they are put into this map * @param tableArray the tables involved diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java deleted file mode 100644 index bf33cfb26df5f241f996af2d622ca21488979d5b..0000000000000000000000000000000000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupClientImpl.java +++ /dev/null @@ -1,183 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup.impl; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Set; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupClient; -import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.backup.BackupClientUtil; -import org.apache.hadoop.hbase.backup.HBackupFileSystem; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; - -import com.google.common.collect.Lists; - -/** - * Backup HBase tables locally or on a remote cluster Serve as client entry point for the following - * features: - Full Backup provide local and remote back/restore for a list of tables - Incremental - * backup to build on top of full backup as daily/weekly backup - Convert incremental backup WAL - * files into hfiles - Merge several backup images into one(like merge weekly into monthly) - Add - * and remove table to and from Backup image - Cancel a backup process - Describe information of - * a backup image - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public final class BackupClientImpl implements BackupClient { - private static final Log LOG = LogFactory.getLog(BackupClientImpl.class); - private Configuration conf; - private BackupManager backupManager; - - public BackupClientImpl() { - } - - @Override - public void setConf(Configuration conf) { - this.conf = conf; - } - - /** - * Prepare and submit Backup request - * @param backupId : backup_timestame (something like backup_1398729212626) - * @param backupType : full or incremental - * @param tableList : tables to be backuped - * @param targetRootDir : specified by user - * @throws IOException exception - */ - protected void requestBackup(String backupId, BackupType backupType, List tableList, - String targetRootDir) throws IOException { - - BackupContext backupContext = null; - - HBaseAdmin hbadmin = null; - Connection conn = null; - try { - backupManager = new BackupManager(conf); - if (backupType == BackupType.INCREMENTAL) { - Set incrTableSet = backupManager.getIncrementalBackupTableSet(); - if (incrTableSet.isEmpty()) { - LOG.warn("Incremental backup table set contains no table.\n" - + "Use 'backup create full' or 'backup stop' to \n " - + "change the tables covered by incremental backup."); - throw new DoNotRetryIOException("No table covered by incremental backup."); - } - - LOG.info("Incremental backup for the following table set: " + incrTableSet); - tableList = Lists.newArrayList(incrTableSet); - } - - // check whether table exists first before starting real request - if (tableList != null) { - ArrayList nonExistingTableList = null; - conn = ConnectionFactory.createConnection(conf); - hbadmin = (HBaseAdmin) conn.getAdmin(); - for (TableName tableName : tableList) { - if (!hbadmin.tableExists(tableName)) { - if (nonExistingTableList == null) { - nonExistingTableList = new ArrayList<>(); - } - nonExistingTableList.add(tableName); - } - } - if (nonExistingTableList != null) { - if (backupType == BackupType.INCREMENTAL ) { - LOG.warn("Incremental backup table set contains non-exising table: " - + nonExistingTableList); - } else { - // Throw exception only in full mode - we try to backup non-existing table - throw new DoNotRetryIOException("Non-existing tables found in the table list: " - + nonExistingTableList); - } - } - } - - // if any target table backup dir already exist, then no backup action taken - if (tableList != null) { - for (TableName table : tableList) { - String targetTableBackupDir = - HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table); - Path targetTableBackupDirPath = new Path(targetTableBackupDir); - FileSystem outputFs = FileSystem.get(targetTableBackupDirPath.toUri(), conf); - if (outputFs.exists(targetTableBackupDirPath)) { - throw new DoNotRetryIOException("Target backup directory " + targetTableBackupDir - + " exists already."); - } - } - } - backupContext = - backupManager.createBackupContext(backupId, backupType, tableList, targetRootDir); - backupManager.initialize(); - backupManager.dispatchRequest(backupContext); - } catch (BackupException e) { - // suppress the backup exception wrapped within #initialize or #dispatchRequest, backup - // exception has already been handled normally - LOG.error("Backup Exception ", e); - } finally { - if (hbadmin != null) { - hbadmin.close(); - } - if (conn != null) { - conn.close(); - } - } - } - - @Override - public String create(BackupType backupType, List tableList, String backupRootPath) - throws IOException { - - String backupId = BackupRestoreConstants.BACKUPID_PREFIX + EnvironmentEdgeManager.currentTime(); - BackupClientUtil.checkTargetDir(backupRootPath, conf); - - // table list specified for backup, trigger backup on specified tables - try { - requestBackup(backupId, backupType, tableList, backupRootPath); - } catch (RuntimeException e) { - String errMsg = e.getMessage(); - if (errMsg != null - && (errMsg.startsWith("Non-existing tables found") || errMsg - .startsWith("Snapshot is not found"))) { - LOG.error(errMsg + ", please check your command"); - throw e; - } else { - throw e; - } - } finally{ - if(backupManager != null) { - backupManager.close(); - } - } - return backupId; - } - -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java index 56e26fa148bed49d61693301e1d076ab78f6444d..9b54f6b7748f00501704d3c5cd393bc22ef1a4dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java @@ -22,12 +22,13 @@ import java.io.IOException; import org.apache.commons.cli.CommandLine; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.hbase.backup.BackupClient; -import org.apache.hadoop.hbase.backup.BackupRestoreFactory; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants.BackupCommand; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import com.google.common.collect.Lists; @@ -108,12 +109,12 @@ public final class BackupCommands { String tables = (args.length == 3) ? args[2] : null; - try { - BackupClient client = BackupRestoreFactory.getBackupClient(getConf()); - client.create(BackupType.valueOf(args[0].toUpperCase()), - Lists.newArrayList(BackupUtil.parseTableNames(tables)), args[1]); - } catch (RuntimeException e) { - System.out.println("ERROR: " + e.getMessage()); + try (Connection conn = ConnectionFactory.createConnection(getConf()); + Admin admin = conn.getAdmin();) { + admin.backupTables(BackupType.valueOf(args[0].toUpperCase()), + Lists.newArrayList(BackupUtil.parseTableNames(tables)), args[1], -1, -1); + } catch (IOException e) { + System.err.println("ERROR: " + e.getMessage()); System.exit(-1); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java index 1be0c3b77507742479775edf45c76b8ba4946788..d4369317a56f2bd163e61c0975fb128302fc3c7d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupContext.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.backup.impl; import java.io.IOException; +import java.io.InputStream; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -308,7 +309,7 @@ public class BackupContext { return null; } - public byte[] toByteArray() throws IOException { + BackupProtos.BackupContext toBackupContext() { BackupProtos.BackupContext.Builder builder = BackupProtos.BackupContext.newBuilder(); builder.setBackupId(getBackupId()); @@ -332,8 +333,11 @@ public class BackupContext { builder.setTargetRootDir(getTargetRootDir()); builder.setTotalBytesCopied(getTotalBytesCopied()); builder.setType(BackupProtos.BackupType.valueOf(getType().name())); - byte[] data = builder.build().toByteArray(); - return data; + return builder.build(); + } + + public byte[] toByteArray() throws IOException { + return toBackupContext().toByteArray(); } private void setBackupStatusMap(Builder builder) { @@ -343,9 +347,15 @@ public class BackupContext { } public static BackupContext fromByteArray(byte[] data) throws IOException { + return fromProto(BackupProtos.BackupContext.parseFrom(data)); + } + + public static BackupContext fromStream(final InputStream stream) throws IOException { + return fromProto(BackupProtos.BackupContext.parseDelimitedFrom(stream)); + } + static BackupContext fromProto(BackupProtos.BackupContext proto) { BackupContext context = new BackupContext(); - BackupProtos.BackupContext proto = BackupProtos.BackupContext.parseFrom(data); context.setBackupId(proto.getBackupId()); context.setBackupStatusMap(toMap(proto.getTableBackupStatusList())); context.setEndTs(proto.getEndTs()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java index b9d71f60b5cfc4b6c244f1e13060579cc10798c1..78e767a7dbbf0bd284fb3329d5adf3917c56654e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupHandler.java @@ -53,7 +53,8 @@ import org.apache.zookeeper.KeeperException.NoNodeException; */ @InterfaceAudience.Private @InterfaceStability.Evolving -public class BackupHandler implements Callable { +public class BackupHandler // implements Callable +{ private static final Log LOG = LogFactory.getLog(BackupHandler.class); // backup phase @@ -83,7 +84,7 @@ public class BackupHandler implements Callable { public BackupContext getBackupContext() { return backupContext; } - + /* @Override public Void call() throws Exception { try(Admin admin = conn.getAdmin()) { @@ -207,7 +208,7 @@ public class BackupHandler implements Callable { } return null; } - + */ /** * Begin the overall backup. * @param backupContext backup context diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java index a4b0a0aace332f3aa2857f7839cf0c3aee6de500..7efddfd526e7c427113a84f74dd132dd4356552a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java @@ -270,37 +270,8 @@ public class BackupManager implements Closeable { ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true); } - /** - * Dispatch and handle a backup request. - * @param backupContext backup context - * @throws BackupException exception - */ - public void dispatchRequest(BackupContext backupContext) throws BackupException { - + public void setBackupContext(BackupContext backupContext) { this.backupContext = backupContext; - - LOG.info("Got a backup request: " + "Type: " + backupContext.getType() + "; Tables: " - + backupContext.getTableNames() + "; TargetRootDir: " + backupContext.getTargetRootDir()); - - // dispatch the request to a backup handler and put it handler map - - BackupHandler handler = new BackupHandler(this.backupContext, this, conf, this.conn); - Future future = this.pool.submit(handler); - // wait for the execution to complete - try { - future.get(); - } catch (InterruptedException e) { - throw new BackupException(e); - } catch (CancellationException e) { - throw new BackupException(e); - } catch (ExecutionException e) { - throw new BackupException(e); - } - - // mark the backup complete for exit handler's processing - backupComplete = true; - - LOG.info("Backup request " + backupContext.getBackupId() + " has been executed."); } /** @@ -476,7 +447,7 @@ public class BackupManager implements Closeable { * @throws IOException exception */ public Set getIncrementalBackupTableSet() throws IOException { - return systemTable.getIncrementalBackupTableSet(); + return BackupSystemTable.getIncrementalBackupTableSet(getConnection()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java index f4d67611953fb219201e3d29ba235e81fd6c7a3f..02cc9c5a372c8a88a80ca257c0fecd1fd5a240b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -416,7 +416,8 @@ public final class BackupSystemTable implements Closeable { * @return set of tableNames * @throws IOException exception */ - public Set getIncrementalBackupTableSet() throws IOException { + public static Set getIncrementalBackupTableSet(Connection connection) + throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("get incr backup table set from hbase:backup"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java index 66175658481b8d91feeb87c5632d388476b0c3ba..c0c5220d73ea24c9c55b6733a9bad1477fe199e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java @@ -286,8 +286,7 @@ public final class RestoreClientImpl implements RestoreClient { String backupId = image.getBackupId(); Path rootPath = new Path(rootDir); - HBackupFileSystem hFS = new HBackupFileSystem(conf, rootPath, backupId); - RestoreUtil restoreTool = new RestoreUtil(conf, hFS); + RestoreUtil restoreTool = new RestoreUtil(conf, rootPath, backupId); BackupManifest manifest = HBackupFileSystem.getManifest(sTable, conf, rootPath, backupId); Path tableBackupPath = HBackupFileSystem.getTableBackupPath(rootPath, sTable, backupId); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreUtil.java index 3882e953e0431915f00d2e221a058b52f9704fe7..9139b6ede0f2b079c1e55c6a46636abb5ff39a89 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreUtil.java @@ -18,15 +18,20 @@ package org.apache.hadoop.hbase.backup.impl; +import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.TreeMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupRestoreFactory; @@ -37,10 +42,17 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.io.HFileLink; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; +import org.apache.hadoop.hbase.regionserver.HStore; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.SnapshotManifest; +import org.apache.hadoop.hbase.util.Bytes; /** * A collection for methods used by multiple classes to restore HBase tables. @@ -51,22 +63,72 @@ public class RestoreUtil { public static final Log LOG = LogFactory.getLog(RestoreUtil.class); - protected Configuration conf = null; + private final String[] ignoreDirs = { "recovered.edits" }; - protected HBackupFileSystem hBackupFS = null; + protected Configuration conf = null; protected Path backupRootPath; protected String backupId; + protected FileSystem fs; + private final String RESTORE_TMP_PATH = "/tmp"; + private final Path restoreTmpPath; + // store table name and snapshot dir mapping private final HashMap snapshotMap = new HashMap<>(); - public RestoreUtil(Configuration conf, HBackupFileSystem hBackupFS) throws IOException { + public RestoreUtil(Configuration conf, final Path backupRootPath, final String backupId) + throws IOException { this.conf = conf; - this.hBackupFS = hBackupFS; - this.backupRootPath = hBackupFS.getBackupRootPath(); - this.backupId = hBackupFS.getBackupId(); + this.backupRootPath = backupRootPath; + this.backupId = backupId; + this.fs = backupRootPath.getFileSystem(conf); + this.restoreTmpPath = new Path(conf.get("hbase.fs.tmp.dir") != null? + conf.get("hbase.fs.tmp.dir"): RESTORE_TMP_PATH, + "restore"); + } + + /** + * return value represent path for: + * ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/archive/data/default/t1_dn" + * @param tabelName table name + * @return path to table archive + * @throws IOException exception + */ + Path getTableArchivePath(TableName tableName) + throws IOException { + Path baseDir = new Path(HBackupFileSystem.getTableBackupPath(backupRootPath, tableName, + backupId), HConstants.HFILE_ARCHIVE_DIRECTORY); + Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR); + Path archivePath = new Path(dataDir, tableName.getNamespaceAsString()); + Path tableArchivePath = + new Path(archivePath, tableName.getQualifierAsString()); + if (!fs.exists(tableArchivePath) || !fs.getFileStatus(tableArchivePath).isDirectory()) { + LOG.debug("Folder tableArchivePath: " + tableArchivePath.toString() + " does not exists"); + tableArchivePath = null; // empty table has no archive + } + return tableArchivePath; + } + + /** + * Gets region list + * @param tableName table name + * @return RegionList region list + * @throws FileNotFoundException exception + * @throws IOException exception + */ + ArrayList getRegionList(TableName tableName) + throws FileNotFoundException, IOException { + Path tableArchivePath = this.getTableArchivePath(tableName); + ArrayList regionDirList = new ArrayList(); + FileStatus[] children = fs.listStatus(tableArchivePath); + for (FileStatus childStatus : children) { + // here child refer to each region(Name) + Path child = childStatus.getPath(); + regionDirList.add(child); + } + return regionDirList; } /** @@ -78,7 +140,7 @@ public class RestoreUtil { * @param newTableNames : target tableNames(table names to be restored to) * @throws IOException exception */ - public void incrementalRestoreTable(String logDir, + void incrementalRestoreTable(String logDir, TableName[] tableNames, TableName[] newTableNames) throws IOException { if (tableNames.length != newTableNames.length) { @@ -103,11 +165,112 @@ public class RestoreUtil { } } - public void fullRestoreTable(Path tableBackupPath, TableName tableName, TableName newTableName, + void fullRestoreTable(Path tableBackupPath, TableName tableName, TableName newTableName, boolean converted) throws IOException { restoreTableAndCreate(tableName, newTableName, tableBackupPath, converted); } + /** + * return value represent path for: + * ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/.hbase-snapshot" + * @param backupRootPath backup root path + * @param tableName table name + * @param backupId backup Id + * @return path for snapshot + */ + static Path getTableSnapshotPath(Path backupRootPath, TableName tableName, + String backupId) { + return new Path(HBackupFileSystem.getTableBackupPath(backupRootPath, tableName, backupId), + HConstants.SNAPSHOT_DIR_NAME); + } + + /** + * return value represent path for: + * "..../default/t1_dn/backup_1396650096738/.hbase-snapshot/snapshot_1396650097621_default_t1_dn" + * this path contains .snapshotinfo, .tabledesc (0.96 and 0.98) this path contains .snapshotinfo, + * .data.manifest (trunk) + * @param tableName table name + * @return path to table info + * @throws FileNotFoundException exception + * @throws IOException exception + */ + Path getTableInfoPath(TableName tableName) + throws FileNotFoundException, IOException { + Path tableSnapShotPath = getTableSnapshotPath(backupRootPath, tableName, backupId); + Path tableInfoPath = null; + + // can't build the path directly as the timestamp values are different + FileStatus[] snapshots = fs.listStatus(tableSnapShotPath); + for (FileStatus snapshot : snapshots) { + tableInfoPath = snapshot.getPath(); + // SnapshotManifest.DATA_MANIFEST_NAME = "data.manifest"; + if (tableInfoPath.getName().endsWith("data.manifest")) { + break; + } + } + return tableInfoPath; + } + + /** + * @param tableName is the table backed up + * @return {@link HTableDescriptor} saved in backup image of the table + */ + HTableDescriptor getTableDesc(TableName tableName) + throws FileNotFoundException, IOException { + Path tableInfoPath = this.getTableInfoPath(tableName); + SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath); + SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc); + HTableDescriptor tableDescriptor = manifest.getTableDescriptor(); + if (!tableDescriptor.getNameAsString().equals(tableName)) { + LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " + + tableInfoPath.toString()); + LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getNameAsString()); + } + return tableDescriptor; + } + + /** + * Duplicate the backup image if it's on local cluster + * @see HStore#bulkLoadHFile(String, long) + * @see HRegionFileSystem#bulkLoadStoreFile(String familyName, Path srcPath, long seqNum) + * @param tableArchivePath archive path + * @return the new tableArchivePath + * @throws IOException exception + */ + Path checkLocalAndBackup(Path tableArchivePath) throws IOException { + // Move the file if it's on local cluster + boolean isCopyNeeded = false; + + FileSystem srcFs = tableArchivePath.getFileSystem(conf); + FileSystem desFs = FileSystem.get(conf); + if (tableArchivePath.getName().startsWith("/")) { + isCopyNeeded = true; + } else { + // This should match what is done in @see HRegionFileSystem#bulkLoadStoreFile(String, Path, + // long) + if (srcFs.getUri().equals(desFs.getUri())) { + LOG.debug("cluster hold the backup image: " + srcFs.getUri() + "; local cluster node: " + + desFs.getUri()); + isCopyNeeded = true; + } + } + if (isCopyNeeded) { + LOG.debug("File " + tableArchivePath + " on local cluster, back it up before restore"); + if (desFs.exists(restoreTmpPath)) { + try { + desFs.delete(restoreTmpPath, true); + } catch (IOException e) { + LOG.debug("Failed to delete path: " + restoreTmpPath + + ", need to check whether restore target DFS cluster is healthy"); + } + } + FileUtil.copy(srcFs, tableArchivePath, desFs, restoreTmpPath, false, conf); + LOG.debug("Copied to temporary path on local cluster: " + restoreTmpPath); + tableArchivePath = restoreTmpPath; + } + return tableArchivePath; + } + private void restoreTableAndCreate(TableName tableName, TableName newTableName, Path tableBackupPath, boolean converted) throws IOException { if (newTableName == null || newTableName.equals("")) { @@ -119,8 +282,7 @@ public class RestoreUtil { // get table descriptor first HTableDescriptor tableDescriptor = null; - Path tableSnapshotPath = HBackupFileSystem.getTableSnapshotPath(backupRootPath, tableName, - backupId); + Path tableSnapshotPath = getTableSnapshotPath(backupRootPath, tableName, backupId); if (fileSys.exists(tableSnapshotPath)) { // snapshot path exist means the backup path is in HDFS @@ -130,11 +292,9 @@ public class RestoreUtil { SnapshotDescriptionUtils.readSnapshotInfo(fileSys, tableSnapshotPath); SnapshotManifest manifest = SnapshotManifest.open(conf, fileSys, tableSnapshotPath, desc); tableDescriptor = manifest.getTableDescriptor(); - - } else { - tableDescriptor = hBackupFS.getTableDesc(tableName); - snapshotMap.put(tableName, hBackupFS.getTableInfoPath(tableName)); + tableDescriptor = getTableDesc(tableName); + snapshotMap.put(tableName, getTableInfoPath(tableName)); } if (tableDescriptor == null) { LOG.debug("Found no table descriptor in the snapshot dir, previous schema would be lost"); @@ -144,7 +304,7 @@ public class RestoreUtil { LOG.error("convert will be supported in a future jira"); } - Path tableArchivePath = hBackupFS.getTableArchivePath(tableName); + Path tableArchivePath = getTableArchivePath(tableName); if (tableArchivePath == null) { if (tableDescriptor != null) { // find table descriptor but no archive dir means the table is empty, create table and exit @@ -171,7 +331,7 @@ public class RestoreUtil { // record all region dirs: // load all files in dir try { - ArrayList regionPathList = hBackupFS.getRegionList(tableName); + ArrayList regionPathList = getRegionList(tableName); // should only try to create the table with all region informations, so we could pre-split // the regions in fine grain @@ -180,13 +340,13 @@ public class RestoreUtil { if (tableArchivePath != null) { // start real restore through bulkload // if the backup target is on local cluster, special action needed - Path tempTableArchivePath = hBackupFS.checkLocalAndBackup(tableArchivePath); + Path tempTableArchivePath = checkLocalAndBackup(tableArchivePath); if (tempTableArchivePath.equals(tableArchivePath)) { if(LOG.isDebugEnabled()) { LOG.debug("TableArchivePath for bulkload using existPath: " + tableArchivePath); } } else { - regionPathList = hBackupFS.getRegionList(tempTableArchivePath); // point to the tempDir + regionPathList = getRegionList(tempTableArchivePath); // point to the tempDir if(LOG.isDebugEnabled()) { LOG.debug("TableArchivePath for bulkload using tempPath: " + tempTableArchivePath); } @@ -211,7 +371,72 @@ public class RestoreUtil { } } + /** + * Gets region list + * @param tableArchivePath table archive path + * @return RegionList region list + * @throws FileNotFoundException exception + * @throws IOException exception + */ + ArrayList getRegionList(Path tableArchivePath) throws FileNotFoundException, + IOException { + ArrayList regionDirList = new ArrayList(); + FileStatus[] children = fs.listStatus(tableArchivePath); + for (FileStatus childStatus : children) { + // here child refer to each region(Name) + Path child = childStatus.getPath(); + regionDirList.add(child); + } + return regionDirList; + } + /** + * Counts the number of files in all subdirectories of an HBase table, i.e. HFiles. + * @param regionPath Path to an HBase table directory + * @return the number of files all directories + * @throws IOException exception + */ + int getNumberOfFilesInDir(Path regionPath) throws IOException { + int result = 0; + + if (!fs.exists(regionPath) || !fs.getFileStatus(regionPath).isDirectory()) { + throw new IllegalStateException("Cannot restore hbase table because directory '" + + regionPath.toString() + "' is not a directory."); + } + + FileStatus[] tableDirContent = fs.listStatus(regionPath); + for (FileStatus subDirStatus : tableDirContent) { + FileStatus[] colFamilies = fs.listStatus(subDirStatus.getPath()); + for (FileStatus colFamilyStatus : colFamilies) { + FileStatus[] colFamilyContent = fs.listStatus(colFamilyStatus.getPath()); + result += colFamilyContent.length; + } + } + return result; + } + + /** + * Counts the number of files in all subdirectories of an HBase tables, i.e. HFiles. And finds the + * maximum number of files in one HBase table. + * @param tableArchivePath archive path + * @return the maximum number of files found in 1 HBase table + * @throws IOException exception + */ + int getMaxNumberOfFilesInSubDir(Path tableArchivePath) throws IOException { + int result = 1; + ArrayList regionPathList = getRegionList(tableArchivePath); + // tableArchivePath = this.getTableArchivePath(tableName); + + if (regionPathList == null || regionPathList.size() == 0) { + throw new IllegalStateException("Cannot restore hbase table because directory '" + + tableArchivePath + "' is not a directory."); + } + + for (Path regionPath : regionPathList) { + result = Math.max(result, getNumberOfFilesInDir(regionPath)); + } + return result; + } /** * Create a {@link LoadIncrementalHFiles} instance to be used to restore the HFiles of a full @@ -228,8 +453,8 @@ public class RestoreUtil { Integer milliSecInMin = 60000; Integer previousMillis = this.conf.getInt("hbase.rpc.timeout", 0); Integer numberOfFilesInDir = - multipleTables ? hBackupFS.getMaxNumberOfFilesInSubDir(tableArchivePath) : hBackupFS - .getNumberOfFilesInDir(tableArchivePath); + multipleTables ? getMaxNumberOfFilesInSubDir(tableArchivePath) : + getNumberOfFilesInDir(tableArchivePath); Integer calculatedMillis = numberOfFilesInDir * milliSecInMin; // 1 minute per file Integer resultMillis = Math.max(calculatedMillis, previousMillis); if (resultMillis > previousMillis) { @@ -249,6 +474,86 @@ public class RestoreUtil { } /** + * Calculate region boundaries and add all the column families to the table descriptor + * @param regionDirList region dir list + * @return a set of keys to store the boundaries + */ + byte[][] generateBoundaryKeys(ArrayList regionDirList) + throws FileNotFoundException, IOException { + TreeMap map = new TreeMap(Bytes.BYTES_COMPARATOR); + // Build a set of keys to store the boundaries + byte[][] keys = null; + // calculate region boundaries and add all the column families to the table descriptor + for (Path regionDir : regionDirList) { + LOG.debug("Parsing region dir: " + regionDir); + Path hfofDir = regionDir; + + if (!fs.exists(hfofDir)) { + LOG.warn("HFileOutputFormat dir " + hfofDir + " not found"); + } + + FileStatus[] familyDirStatuses = fs.listStatus(hfofDir); + if (familyDirStatuses == null) { + throw new IOException("No families found in " + hfofDir); + } + + for (FileStatus stat : familyDirStatuses) { + if (!stat.isDirectory()) { + LOG.warn("Skipping non-directory " + stat.getPath()); + continue; + } + boolean isIgnore = false; + String pathName = stat.getPath().getName(); + for (String ignore : ignoreDirs) { + if (pathName.contains(ignore)) { + LOG.warn("Skipping non-family directory" + pathName); + isIgnore = true; + break; + } + } + if (isIgnore) { + continue; + } + Path familyDir = stat.getPath(); + LOG.debug("Parsing family dir [" + familyDir.toString() + " in region [" + regionDir + "]"); + // Skip _logs, etc + if (familyDir.getName().startsWith("_") || familyDir.getName().startsWith(".")) { + continue; + } + + // start to parse hfile inside one family dir + Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir)); + for (Path hfile : hfiles) { + if (hfile.getName().startsWith("_") || hfile.getName().startsWith(".") + || StoreFileInfo.isReference(hfile.getName()) + || HFileLink.isHFileLink(hfile.getName())) { + continue; + } + HFile.Reader reader = HFile.createReader(fs, hfile, new CacheConfig(conf), conf); + final byte[] first, last; + try { + reader.loadFileInfo(); + first = reader.getFirstRowKey(); + last = reader.getLastRowKey(); + LOG.debug("Trying to figure out region boundaries hfile=" + hfile + " first=" + + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last)); + + // To eventually infer start key-end key boundaries + Integer value = map.containsKey(first) ? (Integer) map.get(first) : 0; + map.put(first, value + 1); + value = map.containsKey(last) ? (Integer) map.get(last) : 0; + map.put(last, value - 1); + } finally { + reader.close(); + } + } + } + } + keys = LoadIncrementalHFiles.inferBoundaries(map); + return keys; + } + + /** * Prepare the table for bulkload, most codes copied from * {@link LoadIncrementalHFiles#createTable(String, String)} * @param tableBackupPath path @@ -278,7 +583,7 @@ public class RestoreUtil { return; } - byte[][] keys = hBackupFS.generateBoundaryKeys(regionDirList); + byte[][] keys = generateBoundaryKeys(regionDirList); // create table using table decriptor and region boundaries hbadmin.createTable(htd, keys); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 89cfd18fb152ec58811b4dad62c495af255b794e..eefc8ee6617d1a2585eb8037ab5ddd05f29300ca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -49,6 +49,7 @@ import javax.servlet.http.HttpServletResponse; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.CoordinatedStateException; @@ -75,7 +76,13 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; import org.apache.hadoop.hbase.backup.impl.BackupManager; +import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.FullTableBackupProcedure; +import org.apache.hadoop.hbase.backup.impl.IncrementalTableBackupProcedure; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -142,6 +149,7 @@ import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CompressionTest; import org.apache.hadoop.hbase.util.EncryptionTest; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.util.HasThread; @@ -167,6 +175,9 @@ import org.mortbay.jetty.nio.SelectChannelConnector; import org.mortbay.jetty.servlet.Context; import com.google.common.annotations.VisibleForTesting; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.protobuf.Descriptors; import com.google.protobuf.Service; @@ -303,6 +314,7 @@ public class HMaster extends HRegionServer implements MasterServices { private ClusterStatusPublisher clusterStatusPublisherChore = null; CatalogJanitor catalogJanitorChore; + private Cache backupId2ProcId; private LogCleaner logCleaner; private HFileCleaner hfileCleaner; private ExpiredMobFileCleanerChore expiredMobFileCleanerChore; @@ -785,6 +797,11 @@ public class HMaster extends HRegionServer implements MasterServices { status.setStatus("Starting cluster schema service"); initClusterSchemaService(); + this.backupId2ProcId = CacheBuilder.newBuilder() + .expireAfterWrite(getConfiguration().getInt("hbase.master.cache.procId.expirySec", 5000), + TimeUnit.SECONDS).initialCapacity(10).maximumSize(1000) + .build(); + if (this.cpHost != null) { try { this.cpHost.preMasterInitialization(); @@ -2565,6 +2582,16 @@ public class HMaster extends HRegionServer implements MasterServices { } @Override + public boolean abortBackup(final String backupId, final boolean mayInterruptIfRunning) { + Long procId = backupId2ProcId.getIfPresent(backupId); + if (procId == null) { + return false; + } + final boolean result = this.procedureExecutor.abort(procId, mayInterruptIfRunning); + return result; + } + + @Override public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning) throws IOException { if (cpHost != null) { @@ -2595,6 +2622,50 @@ public class HMaster extends HRegionServer implements MasterServices { return procInfoList; } + @Override + public Pair backupTables(final BackupType type, + List tableList, final String targetRootDir, final int workers, + final long bandwidth) throws IOException { + long procId; + String backupId = BackupRestoreConstants.BACKUPID_PREFIX + EnvironmentEdgeManager.currentTime(); + if (type == BackupType.INCREMENTAL) { + Set incrTableSet = + BackupSystemTable.getIncrementalBackupTableSet(clusterConnection); + if (incrTableSet.isEmpty()) { + LOG.warn("Incremental backup table set contains no table.\n" + + "Use 'backup create full' or 'backup stop' to \n " + + "change the tables covered by incremental backup."); + throw new DoNotRetryIOException("No table covered by incremental backup."); + } + + LOG.info("Incremental backup for the following table set: " + incrTableSet); + tableList = Lists.newArrayList(incrTableSet); + } + if (tableList != null) { + for (TableName table : tableList) { + String targetTableBackupDir = + HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table); + Path targetTableBackupDirPath = new Path(targetTableBackupDir); + FileSystem outputFs = FileSystem.get(targetTableBackupDirPath.toUri(), conf); + if (outputFs.exists(targetTableBackupDirPath)) { + throw new DoNotRetryIOException("Target backup directory " + targetTableBackupDir + + " exists already."); + } + } + } + if (type == BackupType.FULL) { + procId = this.procedureExecutor.submitProcedure( + new FullTableBackupProcedure(procedureExecutor.getEnvironment(), backupId, + tableList, targetRootDir, workers, bandwidth)); + } else { + procId = this.procedureExecutor.submitProcedure( + new IncrementalTableBackupProcedure(procedureExecutor.getEnvironment(), backupId, + tableList, targetRootDir, workers, bandwidth)); + } + this.backupId2ProcId.put(backupId, procId); + return new Pair<>(procId, backupId); + } + /** * Returns the list of table descriptors that match the specified request * @param namespace the namespace to query, or null if querying for all diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index d6a53cd0c69514e3e71843dd872e2ec6d64e5414..e29f70fa9486bb15325f50801bbfea3615d316fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownRegionException; +import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.TableState; @@ -1021,6 +1022,17 @@ public class MasterRpcServices extends RSRpcServices } @Override + public AbortBackupResponse abortBackup( + RpcController rpcController, + AbortBackupRequest request) throws ServiceException { + AbortBackupResponse.Builder response = AbortBackupResponse.newBuilder(); + boolean abortResult = + master.abortBackup(request.getBackupId(), request.getMayInterruptIfRunning()); + response.setIsBackupAborted(abortResult); + return response.build(); + } + + @Override public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController c, ListNamespaceDescriptorsRequest request) throws ServiceException { try { @@ -1052,6 +1064,25 @@ public class MasterRpcServices extends RSRpcServices } @Override + public MasterProtos.BackupTablesResponse backupTables( + RpcController controller, + MasterProtos.BackupTablesRequest request) throws ServiceException { + try { + BackupTablesResponse.Builder response = BackupTablesResponse.newBuilder(); + List tablesList = new ArrayList<>(request.getTablesList().size()); + for (HBaseProtos.TableName table : request.getTablesList()) { + tablesList.add(ProtobufUtil.toTableName(table)); + } + Pair pair = master.backupTables( + BackupType.valueOf(request.getType().name()), tablesList, request.getTargetRootDir(), + (int)request.getWorkers(), request.getBandwidth()); + return response.setProcId(pair.getFirst()).setBackupId(pair.getSecond()).build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c, ListTableDescriptorsByNamespaceRequest request) throws ServiceException { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 59c7a88e1e9efe159c2cb8d7f61cfec022ab9baf..3beb5eeefb0e61e3edb886d0ff06b23827fcdf84 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; import java.util.List; +import java.util.concurrent.Future; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -30,6 +31,7 @@ import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer; @@ -37,6 +39,7 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.quotas.MasterQuotaManager; import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.util.Pair; import com.google.protobuf.Service; @@ -167,6 +170,23 @@ public interface MasterServices extends Server { throws IOException; /** + * Full backup given list of tables + * @param type whether the backup is full or incremental + * @param tableList list of tables to backup + * @param targetRootDir root dir for saving the backup + * @param workers number of paralle workers. -1 - system defined + * @param bandwidth bandwidth per worker in MB per sec. -1 - unlimited + * @return pair of procedure Id and backupId + * @throws IOException + */ + public Pair backupTables( + final BackupType type, + List tableList, + final String targetRootDir, + final int workers, + final long bandwidth) throws IOException; + + /** * Enable an existing table * @param tableName The table name * @param nonceGroup @@ -291,6 +311,16 @@ public interface MasterServices extends Server { throws IOException; /** + * Abort a backup. + * @param backupId ID of the backup + * @param mayInterruptIfRunning if the backup completed at least one step, should it be aborted? + * @return true if aborted, false if backup already completed or does not exist + * @throws IOException + */ + public boolean abortBackup(final String backupId, final boolean mayInterruptIfRunning) + throws IOException; + + /** * List procedures * @return procedure list * @throws IOException diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java index cc088f35f0bfa3e69dd9f2b72fc4cb7c796f47f3..56983a9ce76070ed1d48fb7108499fea3174b3f6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; @InterfaceStability.Evolving public interface TableProcedureInterface { public enum TableOperationType { - CREATE, DELETE, DISABLE, EDIT, ENABLE, READ, + CREATE, DELETE, DISABLE, EDIT, ENABLE, READ, BACKUP, }; /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index 84b7c78381849fae23dcac1b58831e2fc66aeecd..ba1081f4e40bfe4871927355574096ca80b164a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -126,6 +126,30 @@ public class TestBackupBase { TEST_UTIL.shutdownMiniMapReduceCluster(); } + protected String backupTables(BackupType type, List tables, String path) + throws IOException { + Connection conn = null; + HBaseAdmin admin = null; + String backupId; + try { + conn = ConnectionFactory.createConnection(conf1); + admin = (HBaseAdmin) conn.getAdmin(); + backupId = admin.backupTablesSync(type, tables, path, -1, -1); + } finally { + if (admin != null) { + admin.close(); + } + if (conn != null) { + conn.close(); + } + } + return backupId; + } + + protected String fullTableBackup(List tables) throws IOException { + return backupTables(BackupType.FULL, tables, BACKUP_ROOT_DIR); + } + protected static void loadTable(HTable table) throws Exception { Put p; // 100 + 1 row to t1_syncup @@ -140,6 +164,7 @@ public class TestBackupBase { long tid = System.currentTimeMillis(); table1 = TableName.valueOf("test-" + tid); + BackupSystemTable backupTable = new BackupSystemTable(TEST_UTIL.getConnection()); HBaseAdmin ha = TEST_UTIL.getHBaseAdmin(); HTableDescriptor desc = new HTableDescriptor(table1); HColumnDescriptor fam = new HColumnDescriptor(famName); @@ -187,10 +212,6 @@ public class TestBackupBase { } } - protected BackupClient getBackupClient(){ - return BackupRestoreFactory.getBackupClient(conf1); - } - protected RestoreClient getRestoreClient() { return BackupRestoreFactory.getRestoreClient(conf1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java index 21bf63c162fa0d3ab10d9cb9aa0fb29d3efdd93a..7f5846cb841fd15730dc975a64f07d5946645bbf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java @@ -46,9 +46,7 @@ public class TestBackupBoundaryTests extends TestBackupBase { LOG.info("create full backup image on single table"); List tables = Lists.newArrayList(table3); - String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); - LOG.info("Finished Backup"); - assertTrue(checkSucceeded(backupId)); + LOG.info("Finished Backup " + fullTableBackup(tables)); } /** @@ -60,8 +58,7 @@ public class TestBackupBoundaryTests extends TestBackupBase { LOG.info("create full backup image on mulitple empty tables"); List tables = Lists.newArrayList(table3, table4); - String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); - assertTrue(checkSucceeded(backupId)); + fullTableBackup(tables); } /** @@ -73,8 +70,7 @@ public class TestBackupBoundaryTests extends TestBackupBase { LOG.info("test full backup fails on a single table that does not exist"); List tables = toList("tabledne"); - String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); - assertTrue(checkSucceeded(backupId)); + fullTableBackup(tables); } /** @@ -86,8 +82,7 @@ public class TestBackupBoundaryTests extends TestBackupBase { LOG.info("test full backup fails on multiple tables that do not exist"); List tables = toList("table1dne", "table2dne"); - String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); - assertTrue(checkSucceeded(backupId)); + fullTableBackup(tables); } /** @@ -99,7 +94,6 @@ public class TestBackupBoundaryTests extends TestBackupBase { LOG.info("create full backup fails on tableset containing real and fake table"); List tables = toList(table1.getNameAsString(), "tabledne"); - String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); - //assertTrue(checkSucceeded(backupId)); // TODO + fullTableBackup(tables); } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java index 899f53bdaf8ec9b4149b1f486f410d906c77795f..175bce06df0b131817c155d1086aa01918e8dade 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java @@ -80,9 +80,8 @@ public class TestBackupLogCleaner extends TestBackupBase { assertTrue(Iterables.size(deletable) == walFiles.size()); systemTable.addWALFiles(swalFiles, "backup"); - String backupIdFull = getBackupClient().create(BackupType.FULL, tableSetFullList, - BACKUP_ROOT_DIR); - assertTrue(checkSucceeded(backupIdFull)); + String backupIdFull = fullTableBackup(tableSetFullList); + // getBackupClient().create(BackupType.FULL, tableSetFullList, BACKUP_ROOT_DIR); // Check one more time deletable = cleaner.getDeletableFiles(walFiles); // We can delete wal files because they were saved into hbase:backup table @@ -122,9 +121,8 @@ public class TestBackupLogCleaner extends TestBackupBase { // #3 - incremental backup for multiple tables List tableSetIncList = Lists.newArrayList(table1, table2, table3); - String backupIdIncMultiple = - getBackupClient().create(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR); - assertTrue(checkSucceeded(backupIdIncMultiple)); + String backupIdIncMultiple = backupTables(BackupType.INCREMENTAL, tableSetIncList, + BACKUP_ROOT_DIR); deletable = cleaner.getDeletableFiles(newWalFiles); assertTrue(Iterables.size(deletable) == newWalFiles.size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java index 17d5013fff24f5dcad8a7db72aa97afa88c2acb4..bf902e98fbd9417033f0c308379d317b0abd8f69 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java @@ -188,7 +188,8 @@ public class TestBackupSystemTable { tables2.add(TableName.valueOf("t5")); table.addIncrementalBackupTableSet(tables1); - TreeSet res1 = (TreeSet) table.getIncrementalBackupTableSet(); + TreeSet res1 = (TreeSet) BackupSystemTable + .getIncrementalBackupTableSet(conn); assertTrue(tables1.size() == res1.size()); Iterator desc1 = tables1.descendingIterator(); Iterator desc2 = res1.descendingIterator(); @@ -197,7 +198,8 @@ public class TestBackupSystemTable { } table.addIncrementalBackupTableSet(tables2); - TreeSet res2 = (TreeSet) table.getIncrementalBackupTableSet(); + TreeSet res2 = (TreeSet) BackupSystemTable + .getIncrementalBackupTableSet(conn); assertTrue((tables2.size() + tables1.size() - 1) == res2.size()); tables1.addAll(tables2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java index d9bade1769a04c63e871432916ca0e6c069f6ca0..9dda9673bc67a717bd66dbacbd92b66d92708b1e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java @@ -11,8 +11,6 @@ package org.apache.hadoop.hbase.backup; -import static org.junit.Assert.assertTrue; - import java.util.List; import org.apache.commons.logging.Log; @@ -37,9 +35,8 @@ public class TestFullBackup extends TestBackupBase { public void testFullBackupSingle() throws Exception { LOG.info("test full backup on a single table with data"); List tables = Lists.newArrayList(table1); - String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); - assertTrue(checkSucceeded(backupId)); - LOG.info("backup complete"); + String backupId = fullTableBackup(tables); + LOG.info("backup complete for " + backupId); } /** @@ -50,8 +47,7 @@ public class TestFullBackup extends TestBackupBase { public void testFullBackupMultiple() throws Exception { LOG.info("create full backup image on multiple tables with data"); List tables = Lists.newArrayList(table1, table1); - String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); - assertTrue(checkSucceeded(backupId)); + String backupId = fullTableBackup(tables); } /** @@ -61,7 +57,6 @@ public class TestFullBackup extends TestBackupBase { @Test public void testFullBackupAll() throws Exception { LOG.info("create full backup image on all tables"); - String backupId = getBackupClient().create(BackupType.FULL, null, BACKUP_ROOT_DIR); - assertTrue(checkSucceeded(backupId)); + String backupId = fullTableBackup(null); } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java index e4c4a07b4959e4298231eaff914e1f23fc6cd312..3139134f9fb02f4ccab0d462249ec6b4e1c71529 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java @@ -18,7 +18,6 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -42,14 +41,11 @@ public class TestFullRestore extends TestBackupBase { LOG.info("test full restore on a single table empty table"); List tables = Lists.newArrayList(table1); - String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); - assertTrue(checkSucceeded(backupId)); + String backupId = fullTableBackup(tables); LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; - Path path = new Path(BACKUP_ROOT_DIR); - HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId); RestoreClient client = getRestoreClient(); client.restore(BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap, false); HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); @@ -66,13 +62,10 @@ public class TestFullRestore extends TestBackupBase { public void testFullRestoreMultiple() throws Exception { LOG.info("create full backup image on multiple tables"); List tables = Lists.newArrayList(table2, table3); - String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); - assertTrue(checkSucceeded(backupId)); + String backupId = fullTableBackup(tables); TableName[] restore_tableset = new TableName[] { table2, table3 }; TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; - Path path = new Path(BACKUP_ROOT_DIR); - HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId); RestoreClient client = getRestoreClient(); client.restore(BACKUP_ROOT_DIR, backupId, false, false, restore_tableset, tablemap, false); @@ -93,13 +86,10 @@ public class TestFullRestore extends TestBackupBase { LOG.info("test full restore on a single table empty table"); List tables = Lists.newArrayList(table1); - String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); - assertTrue(checkSucceeded(backupId)); + String backupId = fullTableBackup(tables); LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; - Path path = new Path(BACKUP_ROOT_DIR); - HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId); RestoreClient client = getRestoreClient(); client.restore(BACKUP_ROOT_DIR, backupId, false, false, tableset, null, true); @@ -114,12 +104,9 @@ public class TestFullRestore extends TestBackupBase { LOG.info("create full backup image on multiple tables"); List tables = Lists.newArrayList(table2, table3); - String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); - assertTrue(checkSucceeded(backupId)); + String backupId = fullTableBackup(tables); TableName[] restore_tableset = new TableName[] { table2, table3 }; - Path path = new Path(BACKUP_ROOT_DIR); - HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId); RestoreClient client = getRestoreClient(); client.restore(BACKUP_ROOT_DIR, backupId, false, false, restore_tableset, null, true); @@ -134,14 +121,12 @@ public class TestFullRestore extends TestBackupBase { LOG.info("test restore fails on a single table that does not exist"); List tables = Lists.newArrayList(table1); - String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); - assertTrue(checkSucceeded(backupId)); + String backupId = fullTableBackup(tables); + LOG.info("backup complete"); TableName[] tableset = new TableName[] { TableName.valueOf("faketable") }; TableName[] tablemap = new TableName[] { table1_restore }; - Path path = new Path(BACKUP_ROOT_DIR); - HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId); RestoreClient client = getRestoreClient(); client.restore(BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap, false); @@ -157,14 +142,11 @@ public class TestFullRestore extends TestBackupBase { LOG.info("test restore fails on multiple tables that do not exist"); List tables = Lists.newArrayList(table2, table3); - String backupId = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); - assertTrue(checkSucceeded(backupId)); + String backupId = fullTableBackup(tables); TableName[] restore_tableset = new TableName[] { TableName.valueOf("faketable1"), TableName.valueOf("faketable2") }; TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; - Path path = new Path(BACKUP_ROOT_DIR); - HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId); RestoreClient client = getRestoreClient(); client.restore(BACKUP_ROOT_DIR, backupId, false, false, restore_tableset, tablemap, false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java index 23b1af172a2bc0163a4551184aef39a1a6067a98..63e864c2bd74065d0d3f988b44d7d2b38f1e7223 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java @@ -46,17 +46,18 @@ public class TestIncrementalBackup extends TestBackupBase { //implement all testcases in 1 test since incremental backup/restore has dependencies @Test public void TestIncBackupRestore() throws Exception { - HBackupFileSystem hbfs; - // #1 - create full backup for all tables LOG.info("create full backup image for all tables"); List tables = Lists.newArrayList(table1, table2, table3, table4); - String backupIdFull = getBackupClient().create(BackupType.FULL, tables, BACKUP_ROOT_DIR); + HBaseAdmin admin = null; + Connection conn = ConnectionFactory.createConnection(conf1); + admin = (HBaseAdmin) conn.getAdmin(); + + String backupIdFull = admin.backupTablesSync(BackupType.FULL, tables, BACKUP_ROOT_DIR, -1, -1); assertTrue(checkSucceeded(backupIdFull)); - Connection conn = ConnectionFactory.createConnection(conf1); // #2 - insert some data to table HTable t1 = (HTable) conn.getTable(table1); Put p1; @@ -81,13 +82,9 @@ public class TestIncrementalBackup extends TestBackupBase { t2.close(); // #3 - incremental backup for multiple tables - - tables = Lists.newArrayList(table1, table2, table3); - String backupIdIncMultiple = getBackupClient().create(BackupType.INCREMENTAL, - tables, BACKUP_ROOT_DIR); - assertTrue(checkSucceeded(backupIdIncMultiple)); - + String backupIdIncMultiple = admin.backupTablesSync(BackupType.INCREMENTAL, + tables, BACKUP_ROOT_DIR, -1, -1); // #4 - restore full backup for all tables, without overwrite TableName[] tablesRestoreFull = @@ -96,7 +93,6 @@ public class TestIncrementalBackup extends TestBackupBase { TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore, table3_restore, table4_restore }; - hbfs = new HBackupFileSystem(conf1, new Path(BACKUP_ROOT_DIR), backupIdFull); RestoreClient client = getRestoreClient(); client.restore(BACKUP_ROOT_DIR, backupIdFull, false, false, tablesRestoreFull, @@ -133,7 +129,7 @@ public class TestIncrementalBackup extends TestBackupBase { new TableName[] { table1, table2, table3 }; TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore, table3_restore }; - hbfs = new HBackupFileSystem(conf1, new Path(BACKUP_ROOT_DIR), backupIdIncMultiple); + LOG.info("restore inc backup " + backupIdIncMultiple); client = getRestoreClient(); client.restore(BACKUP_ROOT_DIR, backupIdIncMultiple, false, false, tablesRestoreIncMultiple, tablesMapIncMultiple, true); @@ -154,14 +150,12 @@ public class TestIncrementalBackup extends TestBackupBase { tables = toList(table4.getNameAsString()); String backupIdIncEmpty = - getBackupClient().create(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); - assertTrue(checkSucceeded(backupIdIncEmpty)); + admin.backupTablesSync(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR, -1, -1); // #8 - restore incremental backup for single empty table, with overwrite TableName[] tablesRestoreIncEmpty = new TableName[] { table4 }; TableName[] tablesMapIncEmpty = new TableName[] { table4_restore }; - hbfs = new HBackupFileSystem(conf1, new Path(BACKUP_ROOT_DIR), backupIdIncEmpty); getRestoreClient().restore(BACKUP_ROOT_DIR, backupIdIncEmpty, false, false, tablesRestoreIncEmpty, @@ -170,8 +164,8 @@ public class TestIncrementalBackup extends TestBackupBase { hTable = (HTable) conn.getTable(table4_restore); Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0)); hTable.close(); + admin.close(); conn.close(); - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java index 035188c3a21d6a0fddc6fbbb8a7acb935332f65b..f97f30a06bc2afa9235e4699a439cc5f2e9a3f80 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java @@ -11,8 +11,6 @@ package org.apache.hadoop.hbase.backup; -import static org.junit.Assert.assertTrue; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -35,11 +33,9 @@ public class TestRemoteBackup extends TestBackupBase { LOG.info("test remote full backup on a single table"); - String backupId = - getBackupClient().create(BackupType.FULL, - Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR); - LOG.info("backup complete"); - assertTrue(checkSucceeded(backupId)); + String backupId = backupTables(BackupType.FULL, + Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR); + LOG.info("backup complete " + backupId); } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java index 6a66a0d6e44c8e2a4e4999e0b1799d7019b411df..cd597e11cd2045780753f1aeb39d08afff3cde1b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java @@ -35,15 +35,12 @@ public class TestRemoteRestore extends TestBackupBase { public void testFullRestoreRemote() throws Exception { LOG.info("test remote full backup on a single table"); - String backupId = - getBackupClient().create(BackupType.FULL, toList(table1.getNameAsString()), - BACKUP_REMOTE_ROOT_DIR); + String backupId = backupTables(BackupType.FULL, toList(table1.getNameAsString()), + BACKUP_REMOTE_ROOT_DIR); LOG.info("backup complete"); - assertTrue(checkSucceeded(backupId)); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; Path path = new Path(BACKUP_REMOTE_ROOT_DIR); - HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId); getRestoreClient().restore(BACKUP_REMOTE_ROOT_DIR, backupId, false, false, tableset, tablemap, false); HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java index 6f9b3c77f890740d97d8ab81f64fc9d659606c23..acded775f5c6b533ca59b83b3fe33b3229376dc0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java @@ -43,15 +43,10 @@ public class TestRestoreBoundaryTests extends TestBackupBase { @Test public void testFullRestoreSingleEmpty() throws Exception { LOG.info("test full restore on a single table empty table"); - String backupId = - getBackupClient().create(BackupType.FULL, toList(table1.getNameAsString()), - BACKUP_ROOT_DIR); + String backupId = fullTableBackup(toList(table1.getNameAsString())); LOG.info("backup complete"); - assertTrue(checkSucceeded(backupId)); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; - Path path = new Path(BACKUP_ROOT_DIR); - HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId); getRestoreClient().restore(BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap, false); HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); @@ -68,12 +63,9 @@ public class TestRestoreBoundaryTests extends TestBackupBase { LOG.info("create full backup image on multiple tables"); List tables = toList(table2.getNameAsString(), table3.getNameAsString()); - String backupId = getBackupClient().create(BackupType.FULL, tables,BACKUP_ROOT_DIR); - assertTrue(checkSucceeded(backupId)); + String backupId = fullTableBackup(tables); TableName[] restore_tableset = new TableName[] { table2, table3}; TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; - Path path = new Path(BACKUP_ROOT_DIR); - HBackupFileSystem hbfs = new HBackupFileSystem(conf1, path, backupId); getRestoreClient().restore(BACKUP_ROOT_DIR, backupId, false, false, restore_tableset, tablemap, false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 123e8b563a8a6fab090cc815292e467074cfff56..e7ad9b84619ac09786eb340dddc063e09ee21820 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; @@ -83,6 +84,7 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Triple; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -413,11 +415,27 @@ public class TestCatalogJanitor { } @Override + public boolean abortBackup(final String backupId, final boolean mayInterruptIfRunning) + throws IOException { + return false; + } + + @Override public List listProcedures() throws IOException { return null; //To change body of implemented methods use File | Settings | File Templates. } @Override + public Pair backupTables( + final BackupType type, + final List tableList, + final String targetRootDir, final int workers, + final long bandwidth) throws IOException { + return null; + } + + + @Override public List listTableDescriptorsByNamespace(String name) throws IOException { return null; //To change body of implemented methods use File | Settings | File Templates. } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupProcedure.java new file mode 100644 index 0000000000000000000000000000000000000000..a7b6719b062f7f6b4d12df41c735d8494ec82e3d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupProcedure.java @@ -0,0 +1,703 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map.Entry; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupClientUtil; +import org.apache.hadoop.hbase.backup.BackupRestoreFactory; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupPhase; +import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState; +import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; +import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.FullTableBackupState; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSUtils; + +@InterfaceAudience.Private +public class FullTableBackupProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(SnapshotTableProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + private Configuration conf; + private String backupId; + private List tableList; + private String targetRootDir; + HashMap newTimestamps = null; + + private BackupManager backupManager; + private BackupContext backupContext; + + public FullTableBackupProcedure() { + // Required by the Procedure framework to create the procedure on replay + } + + public FullTableBackupProcedure(final MasterProcedureEnv env, + final String backupId, List tableList, String targetRootDir, final int workers, + final long bandwidth) throws IOException { + backupManager = new BackupManager(env.getMasterConfiguration()); + this.backupId = backupId; + this.tableList = tableList; + this.targetRootDir = targetRootDir; + backupContext = + backupManager.createBackupContext(backupId, BackupType.FULL, tableList, targetRootDir); + } + + @Override + public byte[] getResult() { + return backupId.getBytes(); + } + + /** + * Begin the overall backup. + * @param backupContext backup context + * @throws IOException exception + */ + static void beginBackup(BackupManager backupManager, BackupContext backupContext) + throws IOException { + backupManager.setBackupContext(backupContext); + // set the start timestamp of the overall backup + long startTs = EnvironmentEdgeManager.currentTime(); + backupContext.setStartTs(startTs); + // set overall backup status: ongoing + backupContext.setState(BackupState.RUNNING); + LOG.info("Backup " + backupContext.getBackupId() + " started at " + startTs + "."); + + backupManager.updateBackupStatus(backupContext); + if (LOG.isDebugEnabled()) { + LOG.debug("Backup session " + backupContext.getBackupId() + " has been started."); + } + } + + private static String getMessage(Exception e) { + String msg = e.getMessage(); + if (msg == null || msg.equals("")) { + msg = e.getClass().getName(); + } + return msg; + } + + /** + * Delete HBase snapshot for backup. + * @param backupCtx backup context + * @throws Exception exception + */ + private static void deleteSnapshot(BackupContext backupCtx, Configuration conf) + throws IOException { + LOG.debug("Trying to delete snapshot for full backup."); + Connection conn = null; + Admin admin = null; + try { + conn = ConnectionFactory.createConnection(conf); + admin = conn.getAdmin(); + for (String snapshotName : backupCtx.getSnapshotNames()) { + if (snapshotName == null) { + continue; + } + LOG.debug("Trying to delete snapshot: " + snapshotName); + admin.deleteSnapshot(snapshotName); + LOG.debug("Deleting the snapshot " + snapshotName + " for backup " + + backupCtx.getBackupId() + " succeeded."); + } + } finally { + if (admin != null) { + admin.close(); + } + if (conn != null) { + conn.close(); + } + } + } + + /** + * Clean up directories with prefix "exportSnapshot-", which are generated when exporting + * snapshots. + * @throws IOException exception + */ + private static void cleanupExportSnapshotLog(Configuration conf) throws IOException { + FileSystem fs = FSUtils.getCurrentFileSystem(conf); + Path stagingDir = + new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory() + .toString())); + FileStatus[] files = FSUtils.listStatus(fs, stagingDir); + if (files == null) { + return; + } + for (FileStatus file : files) { + if (file.getPath().getName().startsWith("exportSnapshot-")) { + LOG.debug("Delete log files of exporting snapshot: " + file.getPath().getName()); + if (FSUtils.delete(fs, file.getPath(), true) == false) { + LOG.warn("Can not delete " + file.getPath()); + } + } + } + } + + /** + * Clean up the uncompleted data at target directory if the ongoing backup has already entered the + * copy phase. + */ + static void cleanupTargetDir(BackupContext backupContext, Configuration conf) { + try { + // clean up the uncompleted data at target directory if the ongoing backup has already entered + // the copy phase + LOG.debug("Trying to cleanup up target dir. Current backup phase: " + + backupContext.getPhase()); + if (backupContext.getPhase().equals(BackupPhase.SNAPSHOTCOPY) + || backupContext.getPhase().equals(BackupPhase.INCREMENTAL_COPY) + || backupContext.getPhase().equals(BackupPhase.STORE_MANIFEST)) { + FileSystem outputFs = + FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf); + + // now treat one backup as a transaction, clean up data that has been partially copied at + // table level + for (TableName table : backupContext.getTables()) { + Path targetDirPath = + new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(), + backupContext.getBackupId(), table)); + if (outputFs.delete(targetDirPath, true)) { + LOG.info("Cleaning up uncompleted backup data at " + targetDirPath.toString() + + " done."); + } else { + LOG.info("No data has been copied to " + targetDirPath.toString() + "."); + } + + Path tableDir = targetDirPath.getParent(); + FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir); + if (backups == null || backups.length == 0) { + outputFs.delete(tableDir, true); + LOG.debug(tableDir.toString() + " is empty, remove it."); + } + } + } + + } catch (IOException e1) { + LOG.error("Cleaning up uncompleted backup data of " + backupContext.getBackupId() + " at " + + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); + } + } + + /** + * Fail the overall backup. + * @param backupContext backup context + * @param e exception + * @throws Exception exception + */ + static void failBackup(BackupContext backupContext, BackupManager backupManager, Exception e, + String msg, BackupType type, Configuration conf) throws IOException { + LOG.error(msg + getMessage(e)); + // If this is a cancel exception, then we've already cleaned. + + if (backupContext.getState().equals(BackupState.CANCELLED)) { + return; + } + + // set the failure timestamp of the overall backup + backupContext.setEndTs(EnvironmentEdgeManager.currentTime()); + + // set failure message + backupContext.setFailedMsg(e.getMessage()); + + // set overall backup status: failed + backupContext.setState(BackupState.FAILED); + + // compose the backup failed data + String backupFailedData = + "BackupId=" + backupContext.getBackupId() + ",startts=" + backupContext.getStartTs() + + ",failedts=" + backupContext.getEndTs() + ",failedphase=" + backupContext.getPhase() + + ",failedmessage=" + backupContext.getFailedMsg(); + LOG.error(backupFailedData); + + backupManager.updateBackupStatus(backupContext); + + // if full backup, then delete HBase snapshots if there already are snapshots taken + // and also clean up export snapshot log files if exist + if (type == BackupType.FULL) { + deleteSnapshot(backupContext, conf); + cleanupExportSnapshotLog(conf); + } + + // clean up the uncompleted data at target directory if the ongoing backup has already entered + // the copy phase + // For incremental backup, DistCp logs will be cleaned with the targetDir. + cleanupTargetDir(backupContext, conf); + + LOG.info("Backup " + backupContext.getBackupId() + " failed."); + } + + /** + * Do snapshot copy. + * @param backupContext backup context + * @throws Exception exception + */ + private void snapshotCopy(BackupContext backupContext) throws Exception { + LOG.info("Snapshot copy is starting."); + + // set overall backup phase: snapshot_copy + backupContext.setPhase(BackupPhase.SNAPSHOTCOPY); + + // avoid action if has been cancelled + if (backupContext.isCancelled()) { + return; + } + + // call ExportSnapshot to copy files based on hbase snapshot for backup + // ExportSnapshot only support single snapshot export, need loop for multiple tables case + BackupCopyService copyService = BackupRestoreFactory.getBackupCopyService(conf); + + // number of snapshots matches number of tables + float numOfSnapshots = backupContext.getSnapshotNames().size(); + + LOG.debug("There are " + (int) numOfSnapshots + " snapshots to be copied."); + + for (TableName table : backupContext.getTables()) { + // Currently we simply set the sub copy tasks by counting the table snapshot number, we can + // calculate the real files' size for the percentage in the future. + // backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots); + int res = 0; + String[] args = new String[4]; + args[0] = "-snapshot"; + args[1] = backupContext.getSnapshotName(table); + args[2] = "-copy-to"; + args[3] = backupContext.getBackupStatus(table).getTargetDir(); + + LOG.debug("Copy snapshot " + args[1] + " to " + args[3]); + res = copyService.copy(backupContext, backupManager, conf, BackupCopyService.Type.FULL, args); + // if one snapshot export failed, do not continue for remained snapshots + if (res != 0) { + LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + "."); + + throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3] + + " with reason code " + res); + } + + LOG.info("Snapshot copy " + args[1] + " finished."); + } + } + + /** + * Add manifest for the current backup. The manifest is stored + * within the table backup directory. + * @param backupContext The current backup context + * @throws IOException exception + * @throws BackupException exception + */ + private static void addManifest(BackupContext backupContext, BackupManager backupManager, + BackupType type, Configuration conf) throws IOException, BackupException { + // set the overall backup phase : store manifest + backupContext.setPhase(BackupPhase.STORE_MANIFEST); + + // avoid action if has been cancelled + if (backupContext.isCancelled()) { + return; + } + + BackupManifest manifest; + + // Since we have each table's backup in its own directory structure, + // we'll store its manifest with the table directory. + for (TableName table : backupContext.getTables()) { + manifest = new BackupManifest(backupContext, table); + ArrayList ancestors = backupManager.getAncestors(backupContext, table); + for (BackupImage image : ancestors) { + manifest.addDependentImage(image); + } + + if (type == BackupType.INCREMENTAL) { + // We'll store the log timestamps for this table only in its manifest. + HashMap> tableTimestampMap = + new HashMap>(); + tableTimestampMap.put(table, backupContext.getIncrTimestampMap().get(table)); + manifest.setIncrTimestampMap(tableTimestampMap); + ArrayList ancestorss = backupManager.getAncestors(backupContext); + for (BackupImage image : ancestorss) { + manifest.addDependentImage(image); + } + } + // TODO + // manifest.setRelativeWALReferences(backupContext.getRelWALRefs()); + manifest.store(conf); + } + + // For incremental backup, we store a overall manifest in + // /WALs/ + // This is used when created the next incremental backup + if (type == BackupType.INCREMENTAL) { + manifest = new BackupManifest(backupContext); + // set the table region server start and end timestamps for incremental backup + manifest.setIncrTimestampMap(backupContext.getIncrTimestampMap()); + ArrayList ancestors = backupManager.getAncestors(backupContext); + for (BackupImage image : ancestors) { + manifest.addDependentImage(image); + } + // TODO + // manifest.setRelativeWALReferences(backupContext.getRelWALRefs()); + manifest.store(conf); + } + } + + /** + * Get backup request meta data dir as string. + * @param backupContext backup context + * @return meta data dir + */ + private static String obtainBackupMetaDataStr(BackupContext backupContext) { + StringBuffer sb = new StringBuffer(); + sb.append("type=" + backupContext.getType() + ",tablelist="); + for (TableName table : backupContext.getTables()) { + sb.append(table + ";"); + } + if (sb.lastIndexOf(";") > 0) { + sb.delete(sb.lastIndexOf(";"), sb.lastIndexOf(";") + 1); + } + sb.append(",targetRootDir=" + backupContext.getTargetRootDir()); + + return sb.toString(); + } + + /** + * Clean up directories with prefix "_distcp_logs-", which are generated when DistCp copying + * hlogs. + * @throws IOException exception + */ + private static void cleanupDistCpLog(BackupContext backupContext, Configuration conf) + throws IOException { + Path rootPath = new Path(backupContext.getHLogTargetDir()).getParent(); + FileSystem fs = FileSystem.get(rootPath.toUri(), conf); + FileStatus[] files = FSUtils.listStatus(fs, rootPath); + if (files == null) { + return; + } + for (FileStatus file : files) { + if (file.getPath().getName().startsWith("_distcp_logs")) { + LOG.debug("Delete log files of DistCp: " + file.getPath().getName()); + FSUtils.delete(fs, file.getPath(), true); + } + } + } + + /** + * Complete the overall backup. + * @param backupContext backup context + * @throws Exception exception + */ + static void completeBackup(BackupContext backupContext, BackupManager backupManager, + BackupType type, Configuration conf) throws IOException { + // set the complete timestamp of the overall backup + backupContext.setEndTs(EnvironmentEdgeManager.currentTime()); + // set overall backup status: complete + backupContext.setState(BackupState.COMPLETE); + // add and store the manifest for the backup + addManifest(backupContext, backupManager, type, conf); + + // after major steps done and manifest persisted, do convert if needed for incremental backup + /* in-fly convert code here, provided by future jira */ + LOG.debug("in-fly convert code here, provided by future jira"); + + // compose the backup complete data + String backupCompleteData = + obtainBackupMetaDataStr(backupContext) + ",startts=" + backupContext.getStartTs() + + ",completets=" + backupContext.getEndTs() + ",bytescopied=" + + backupContext.getTotalBytesCopied(); + if (LOG.isDebugEnabled()) { + LOG.debug("Backup " + backupContext.getBackupId() + " finished: " + backupCompleteData); + } + backupManager.updateBackupStatus(backupContext); + + // when full backup is done: + // - delete HBase snapshot + // - clean up directories with prefix "exportSnapshot-", which are generated when exporting + // snapshots + if (type == BackupType.FULL) { + deleteSnapshot(backupContext, conf); + cleanupExportSnapshotLog(conf); + } else if (type == BackupType.INCREMENTAL) { + cleanupDistCpLog(backupContext, conf); + } + + LOG.info("Backup " + backupContext.getBackupId() + " completed."); + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, final FullTableBackupState state) + throws InterruptedException { + if (conf == null) { + conf = env.getMasterConfiguration(); + } + if (backupManager == null) { + try { + backupManager = new BackupManager(env.getMasterConfiguration()); + } catch (IOException ioe) { + setFailure("full backup", ioe); + } + } + if (LOG.isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + try (Connection conn = ConnectionFactory.createConnection(env.getMasterConfiguration()); + Admin admin = conn.getAdmin()) { + switch (state) { + case PRE_SNAPSHOT_TABLE: + beginBackup(backupManager, backupContext); + String savedStartCode = null; + boolean firstBackup = false; + // do snapshot for full table backup + + try { + savedStartCode = backupManager.readBackupStartCode(); + firstBackup = savedStartCode == null; + if (firstBackup) { + // This is our first backup. Let's put some marker on ZK so that we can hold the logs + // while we do the backup. + backupManager.writeBackupStartCode(0L); + } + // We roll log here before we do the snapshot. It is possible there is duplicate data + // in the log that is already in the snapshot. But if we do it after the snapshot, we + // could have data loss. + // A better approach is to do the roll log on each RS in the same global procedure as + // the snapshot. + LOG.info("Execute roll log procedure for full backup ..."); + admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, + LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, new HashMap()); + newTimestamps = backupManager.readRegionServerLastLogRollResult(); + if (firstBackup) { + // Updates registered log files + // We record ALL old WAL files as registered, because + // this is a first full backup in the system and these + // files are not needed for next incremental backup + List logFiles = BackupUtil.getWALFilesOlderThan(conf, newTimestamps); + backupManager.recordWALFiles(logFiles); + } + } catch (BackupException e) { + // fail the overall backup and return + failBackup(backupContext, backupManager, e, "Unexpected BackupException : ", + BackupType.FULL, conf); + return null; + } + setNextState(FullTableBackupState.SNAPSHOT_TABLES); + break; + case SNAPSHOT_TABLES: + for (TableName tableName : tableList) { + String snapshotName = "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + + "_" + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString(); + /* + addChildProcedure(new SnapshotTableProcedure(env, tableName, + "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + "_" + + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString())); + */ + HBaseProtos.SnapshotDescription backupSnapshot; + + // wrap a SnapshotDescription for offline/online snapshot + backupSnapshot = SnapshotTableProcedure.wrapSnapshotDescription(tableName,snapshotName); + try { + admin.deleteSnapshot(snapshotName); + } catch (IOException e) { + LOG.debug("Unable to delete " + snapshotName, e); + } + // Kick off snapshot for backup + admin.snapshot(backupSnapshot); + // set the snapshot name in BackupStatus of this table, only after snapshot success. + backupContext.setSnapshotName(tableName, backupSnapshot.getName()); + } + setNextState(FullTableBackupState.SNAPSHOT_COPY); + break; + case SNAPSHOT_COPY: + // do snapshot copy + LOG.debug("snapshot copy for " + backupId); + try { + this.snapshotCopy(backupContext); + } catch (Exception e) { + // fail the overall backup and return + failBackup(backupContext, backupManager, e, "Unexpected BackupException : ", + BackupType.FULL, conf); + return null; + } + // Updates incremental backup table set + backupManager.addIncrementalBackupTableSet(backupContext.getTables()); + setNextState(FullTableBackupState.BACKUP_COMPLETE); + break; + + case BACKUP_COMPLETE: + // set overall backup status: complete. Here we make sure to complete the backup. + // After this checkpoint, even if entering cancel process, will let the backup finished + backupContext.setState(BackupState.COMPLETE); + // The table list in backupContext is good for both full backup and incremental backup. + // For incremental backup, it contains the incremental backup table set. + backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps); + + HashMap> newTableSetTimestampMap = + backupManager.readLogTimestampMap(); + + Long newStartCode = + BackupClientUtil.getMinValue(BackupUtil.getRSLogTimestampMins(newTableSetTimestampMap)); + backupManager.writeBackupStartCode(newStartCode); + + // backup complete + completeBackup(backupContext, backupManager, BackupType.FULL, conf); + return Flow.NO_MORE_STATE; + + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + setFailure("snapshot-table", e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final FullTableBackupState state) + throws IOException { + deleteSnapshot(backupContext, conf); + cleanupExportSnapshotLog(conf); + + // clean up the uncompleted data at target directory if the ongoing backup has already entered + // the copy phase + // For incremental backup, DistCp logs will be cleaned with the targetDir. + cleanupTargetDir(backupContext, conf); + } + + @Override + protected FullTableBackupState getState(final int stateId) { + return FullTableBackupState.valueOf(stateId); + } + + @Override + protected int getStateId(final FullTableBackupState state) { + return state.getNumber(); + } + + @Override + protected FullTableBackupState getInitialState() { + return FullTableBackupState.PRE_SNAPSHOT_TABLE; + } + + @Override + protected void setNextState(final FullTableBackupState state) { + if (aborted.get()) { + setAbortFailure("snapshot-table", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (targetRootDir="); + sb.append(targetRootDir); + sb.append(")"); + } + + BackupProtos.BackupProcContext toBackupContext() { + BackupProtos.BackupProcContext.Builder ctxBuilder = BackupProtos.BackupProcContext.newBuilder(); + ctxBuilder.setCtx(backupContext.toBackupContext()); + if (newTimestamps != null && !newTimestamps.isEmpty()) { + BackupProtos.ServerTimestamp.Builder tsBuilder = ServerTimestamp.newBuilder(); + for (Entry entry : newTimestamps.entrySet()) { + tsBuilder.clear().setServer(entry.getKey()).setTimestamp(entry.getValue()); + ctxBuilder.addServerTimestamp(tsBuilder.build()); + } + } + return ctxBuilder.build(); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + BackupProtos.BackupProcContext backupProcCtx = toBackupContext(); + backupProcCtx.writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + BackupProtos.BackupProcContext proto =BackupProtos.BackupProcContext.parseDelimitedFrom(stream); + backupContext = BackupContext.fromProto(proto.getCtx()); + backupId = backupContext.getBackupId(); + targetRootDir = backupContext.getTargetRootDir(); + tableList = backupContext.getTableNames(); + List svrTimestamps = proto.getServerTimestampList(); + if (svrTimestamps != null && !svrTimestamps.isEmpty()) { + newTimestamps = new HashMap<>(); + for (ServerTimestamp ts : svrTimestamps) { + newTimestamps.put(ts.getServer(), ts.getTimestamp()); + } + } + } + + @Override + public TableName getTableName() { + return TableName.BACKUP_TABLE_NAME; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.BACKUP; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (env.waitInitialized(this)) { + return false; + } + return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupProcedure.java new file mode 100644 index 0000000000000000000000000000000000000000..3a453eb634d88b3fee9c0e7f6d072accc2268977 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupProcedure.java @@ -0,0 +1,325 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map.Entry; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupClientUtil; +import org.apache.hadoop.hbase.backup.BackupRestoreFactory; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupPhase; +import org.apache.hadoop.hbase.backup.impl.BackupHandler.BackupState; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.IncrementalTableBackupState; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp; + +@InterfaceAudience.Private +public class IncrementalTableBackupProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(SnapshotTableProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + private Configuration conf; + private String backupId; + private List tableList; + private String targetRootDir; + HashMap newTimestamps = null; + + private BackupManager backupManager; + private BackupContext backupContext; + + public IncrementalTableBackupProcedure() { + // Required by the Procedure framework to create the procedure on replay + } + + public IncrementalTableBackupProcedure(final MasterProcedureEnv env, + final String backupId, + List tableList, String targetRootDir, final int workers, + final long bandwidth) throws IOException { + backupManager = new BackupManager(env.getMasterConfiguration()); + this.backupId = backupId; + this.tableList = tableList; + this.targetRootDir = targetRootDir; + backupContext = backupManager.createBackupContext(backupId, BackupType.INCREMENTAL, tableList, + targetRootDir); + } + + @Override + public byte[] getResult() { + return backupId.getBytes(); + } + + private List filterMissingFiles(List incrBackupFileList) throws IOException { + FileSystem fs = FileSystem.get(conf); + List list = new ArrayList(); + for(String file : incrBackupFileList){ + if(fs.exists(new Path(file))){ + list.add(file); + } else{ + LOG.warn("Can't find file: "+file); + } + } + return list; + } + + /** + * Do incremental copy. + * @param backupContext backup context + */ + private void incrementalCopy(BackupContext backupContext) throws Exception { + + LOG.info("Incremental copy is starting."); + + // set overall backup phase: incremental_copy + backupContext.setPhase(BackupPhase.INCREMENTAL_COPY); + + // avoid action if has been cancelled + if (backupContext.isCancelled()) { + return; + } + + // get incremental backup file list and prepare parms for DistCp + List incrBackupFileList = backupContext.getIncrBackupFileList(); + // filter missing files out (they have been copied by previous backups) + incrBackupFileList = filterMissingFiles(incrBackupFileList); + String[] strArr = incrBackupFileList.toArray(new String[incrBackupFileList.size() + 1]); + strArr[strArr.length - 1] = backupContext.getHLogTargetDir(); + + BackupCopyService copyService = BackupRestoreFactory.getBackupCopyService(conf); + int res = copyService.copy(backupContext, backupManager, conf, + BackupCopyService.Type.INCREMENTAL, strArr); + + if (res != 0) { + LOG.error("Copy incremental log files failed with return code: " + res + "."); + throw new IOException("Failed of Hadoop Distributed Copy from " + incrBackupFileList + " to " + + backupContext.getHLogTargetDir()); + } + LOG.info("Incremental copy from " + incrBackupFileList + " to " + + backupContext.getHLogTargetDir() + " finished."); + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, + final IncrementalTableBackupState state) + throws InterruptedException { + if (conf == null) { + conf = env.getMasterConfiguration(); + } + if (backupManager == null) { + try { + backupManager = new BackupManager(env.getMasterConfiguration()); + } catch (IOException ioe) { + setFailure("incremental backup", ioe); + } + } + if (LOG.isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + try { + switch (state) { + case PREPARE_INCREMENTAL: + FullTableBackupProcedure.beginBackup(backupManager, backupContext); + LOG.debug("For incremental backup, current table set is " + + backupManager.getIncrementalBackupTableSet()); + try { + IncrementalBackupManager incrBackupManager =new IncrementalBackupManager(backupManager); + + newTimestamps = incrBackupManager.getIncrBackupLogFileList(backupContext); + } catch (Exception e) { + // fail the overall backup and return + FullTableBackupProcedure.failBackup(backupContext, backupManager, e, + "Unexpected Exception : ", BackupType.INCREMENTAL, conf); + } + + setNextState(IncrementalTableBackupState.INCREMENTAL_COPY); + break; + case INCREMENTAL_COPY: + try { + // copy out the table and region info files for each table + BackupUtil.copyTableRegionInfo(backupContext, conf); + incrementalCopy(backupContext); + // Save list of WAL files copied + backupManager.recordWALFiles(backupContext.getIncrBackupFileList()); + } catch (Exception e) { + // fail the overall backup and return + FullTableBackupProcedure.failBackup(backupContext, backupManager, e, + "Unexpected exception doing incremental copy : ", BackupType.INCREMENTAL, conf); + } + setNextState(IncrementalTableBackupState.INCR_BACKUP_COMPLETE); + break; + case INCR_BACKUP_COMPLETE: + // set overall backup status: complete. Here we make sure to complete the backup. + // After this checkpoint, even if entering cancel process, will let the backup finished + backupContext.setState(BackupState.COMPLETE); + // Set the previousTimestampMap which is before this current log roll to the manifest. + HashMap> previousTimestampMap = + backupManager.readLogTimestampMap(); + backupContext.setIncrTimestampMap(previousTimestampMap); + + // The table list in backupContext is good for both full backup and incremental backup. + // For incremental backup, it contains the incremental backup table set. + backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps); + + HashMap> newTableSetTimestampMap = + backupManager.readLogTimestampMap(); + + Long newStartCode = BackupClientUtil + .getMinValue(BackupUtil.getRSLogTimestampMins(newTableSetTimestampMap)); + backupManager.writeBackupStartCode(newStartCode); + // backup complete + FullTableBackupProcedure.completeBackup(backupContext, backupManager, + BackupType.INCREMENTAL, conf); + return Flow.NO_MORE_STATE; + + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + setFailure("snapshot-table", e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, + final IncrementalTableBackupState state) throws IOException { + // clean up the uncompleted data at target directory if the ongoing backup has already entered + // the copy phase + // For incremental backup, DistCp logs will be cleaned with the targetDir. + FullTableBackupProcedure.cleanupTargetDir(backupContext, conf); + } + + @Override + protected IncrementalTableBackupState getState(final int stateId) { + return IncrementalTableBackupState.valueOf(stateId); + } + + @Override + protected int getStateId(final IncrementalTableBackupState state) { + return state.getNumber(); + } + + @Override + protected IncrementalTableBackupState getInitialState() { + return IncrementalTableBackupState.PREPARE_INCREMENTAL; + } + + @Override + protected void setNextState(final IncrementalTableBackupState state) { + if (aborted.get()) { + setAbortFailure("snapshot-table", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (targetRootDir="); + sb.append(targetRootDir); + sb.append(")"); + } + + BackupProtos.BackupProcContext toBackupContext() { + BackupProtos.BackupProcContext.Builder ctxBuilder = BackupProtos.BackupProcContext.newBuilder(); + ctxBuilder.setCtx(backupContext.toBackupContext()); + if (newTimestamps != null && !newTimestamps.isEmpty()) { + BackupProtos.ServerTimestamp.Builder tsBuilder = ServerTimestamp.newBuilder(); + for (Entry entry : newTimestamps.entrySet()) { + tsBuilder.clear().setServer(entry.getKey()).setTimestamp(entry.getValue()); + ctxBuilder.addServerTimestamp(tsBuilder.build()); + } + } + return ctxBuilder.build(); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + BackupProtos.BackupProcContext backupProcCtx = toBackupContext(); + backupProcCtx.writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + BackupProtos.BackupProcContext proto =BackupProtos.BackupProcContext.parseDelimitedFrom(stream); + backupContext = BackupContext.fromProto(proto.getCtx()); + backupId = backupContext.getBackupId(); + targetRootDir = backupContext.getTargetRootDir(); + tableList = backupContext.getTableNames(); + List svrTimestamps = proto.getServerTimestampList(); + if (svrTimestamps != null && !svrTimestamps.isEmpty()) { + newTimestamps = new HashMap<>(); + for (ServerTimestamp ts : svrTimestamps) { + newTimestamps.put(ts.getServer(), ts.getTimestamp()); + } + } + } + + @Override + public TableName getTableName() { + return TableName.BACKUP_TABLE_NAME; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.BACKUP; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (env.waitInitialized(this)) { + return false; + } + return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/SnapshotTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/SnapshotTableProcedure.java new file mode 100644 index 0000000000000000000000000000000000000000..90c1fae4e93313b1688954e63937431d565d8104 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/SnapshotTableProcedure.java @@ -0,0 +1,202 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; +import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableState; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; + +@InterfaceAudience.Private +public class SnapshotTableProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(SnapshotTableProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + private TableName tableName; + private String snapshotName; + + public SnapshotTableProcedure() { + // Required by the Procedure framework to create the procedure on replay + } + + public SnapshotTableProcedure(final MasterProcedureEnv env, + final TableName tableName, final String snapshotName) + throws IOException { + this.snapshotName = snapshotName; + this.tableName = tableName; + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, final SnapshotTableState state) + throws InterruptedException { + if (LOG.isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + try { + switch (state) { + case SNAPSHOT_TABLE: + HBaseProtos.SnapshotDescription backupSnapshot; + + // wrap a SnapshotDescription for offline/online snapshot + backupSnapshot = this.wrapSnapshotDescription(tableName, snapshotName); + + Admin admin = ConnectionFactory.createConnection(env.getMasterConfiguration()).getAdmin(); + // try to remove existing snapshot + try { + admin.deleteSnapshot(snapshotName); + } catch (IOException e) { + LOG.debug("Unable to delete " + snapshotName, e); + } + // Kick off snapshot for backup + admin.snapshot(backupSnapshot); + // set the snapshot name in BackupStatus of this table, only after snapshot success. + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + setFailure("snapshot-table", e); + } + return Flow.HAS_MORE_STATE; + } + + /** + * Wrap a SnapshotDescription for a target table. + * @param table table + * @return a SnapshotDescription especially for backup. + */ + static SnapshotDescription wrapSnapshotDescription(TableName tableName, String snapshotName) { + // Mock a SnapshotDescription from backupContext to call SnapshotManager function, + // Name it in the format "snapshot__" + HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder(); + builder.setTable(tableName.getNameAsString()); + builder.setName(snapshotName); + HBaseProtos.SnapshotDescription backupSnapshot = builder.build(); + + LOG.debug("Wrapped a SnapshotDescription " + backupSnapshot.getName() + + " from backupContext to request snapshot for backup."); + + return backupSnapshot; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final SnapshotTableState state) + throws IOException { + } + + @Override + protected SnapshotTableState getState(final int stateId) { + return SnapshotTableState.valueOf(stateId); + } + + @Override + protected int getStateId(final SnapshotTableState state) { + return state.getNumber(); + } + + @Override + protected SnapshotTableState getInitialState() { + return SnapshotTableState.SNAPSHOT_TABLE; + } + + @Override + protected void setNextState(final SnapshotTableState state) { + if (aborted.get()) { + setAbortFailure("snapshot-table", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (table="); + sb.append(tableName); + sb.append(")"); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + BackupProtos.SnapshotTableStateData.Builder state = + BackupProtos.SnapshotTableStateData.newBuilder() + .setTable(ProtobufUtil.toProtoTableName(tableName)) + .setSnapshotName(snapshotName); + state.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + BackupProtos.SnapshotTableStateData state = + BackupProtos.SnapshotTableStateData.parseDelimitedFrom(stream); + tableName = ProtobufUtil.toTableName(state.getTable()); + snapshotName = state.getSnapshotName(); + } + + @Override + public TableName getTableName() { + return tableName; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.BACKUP; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (!tableName.isSystemTable() && env.waitInitialized(this)) { + return false; + } + return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, tableName); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableExclusiveLock(this, tableName); + } +}