diff --git a/bin/hbase b/bin/hbase index 1653c5a..88ab249 100755 --- a/bin/hbase +++ b/bin/hbase @@ -103,6 +103,8 @@ if [ $# = 0 ]; then echo " ltt Run LoadTestTool" echo " canary Run the Canary tool" echo " version Print the version" + echo " backup backup tables for recovery" + echo " restore restore tables from existing backup image" echo " CLASSNAME Run the class named CLASSNAME" exit 1 fi @@ -315,6 +317,10 @@ elif [ "$COMMAND" = "hfile" ] ; then CLASS='org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter' elif [ "$COMMAND" = "zkcli" ] ; then CLASS="org.apache.hadoop.hbase.zookeeper.ZooKeeperMainServer" +elif [ "$COMMAND" = "backup" ] ; then + CLASS='org.apache.hadoop.hbase.backup.BackupDriver' +elif [ "$COMMAND" = "restore" ] ; then + CLASS='org.apache.hadoop.hbase.backup.RestoreDriver' elif [ "$COMMAND" = "upgrade" ] ; then echo "This command was used to upgrade to HBase 0.96, it was removed in HBase 2.0.0." echo "Please follow the documentation at http://hbase.apache.org/book.html#upgrading." diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 34e0a89..49a7223 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -37,10 +37,13 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.backup.BackupRequest; +import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.security.SecurityCapability; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse; import org.apache.hadoop.hbase.quotas.QuotaFilter; import org.apache.hadoop.hbase.quotas.QuotaRetriever; import org.apache.hadoop.hbase.quotas.QuotaSettings; @@ -926,6 +929,7 @@ public interface Admin extends Abortable, Closeable { void splitRegion(final byte[] regionName, final byte[] splitPoint) throws IOException; + /** * Modify an existing table, more IRB friendly version. Asynchronous operation. This means that * it may be a while before your schema change is updated across all of the table. @@ -1754,4 +1758,11 @@ public interface Admin extends Abortable, Closeable { * and rollback the switch state to be original state before you change switch * */ void releaseSplitOrMergeLockAndRollback() throws IOException; + + /** + * Get Backup Admin interface + * @return backup admin object + * @throws IOException exception + */ + BackupAdmin getBackupAdmin() throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index 1fe29c8..2e40481 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -1316,6 +1316,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } @Override + public MasterProtos.BackupTablesResponse backupTables( + RpcController controller, + MasterProtos.BackupTablesRequest request) throws ServiceException { + return stub.backupTables(controller, request); + } + + @Override public MasterProtos.AddColumnResponse addColumn( RpcController controller, MasterProtos.AddColumnRequest request) throws ServiceException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index ac98bdb..67afa12 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -66,6 +66,8 @@ import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.backup.BackupRequest; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.security.SecurityCapability; @@ -90,6 +92,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterReque import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; @@ -214,6 +218,7 @@ public class HBaseAdmin implements Admin { // want to wait a long time. private final int retryLongerMultiplier; private final int syncWaitTimeout; + private final long backupWaitTimeout; private boolean aborted; private int operationTimeout; private int rpcTimeout; @@ -245,6 +250,8 @@ public class HBaseAdmin implements Admin { HConstants.DEFAULT_HBASE_RPC_TIMEOUT); this.syncWaitTimeout = this.conf.getInt( "hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min + this.backupWaitTimeout = this.conf.getInt( + "hbase.client.backup.wait.timeout.sec", 24 * 3600); // 24 h this.rpcCallerFactory = connection.getRpcRetryingCallerFactory(); this.rpcControllerFactory = connection.getRpcControllerFactory(); @@ -435,14 +442,6 @@ public class HBaseAdmin implements Admin { throw new TableNotFoundException(tableName.getNameAsString()); } - private long getPauseTime(int tries) { - int triesCount = tries; - if (triesCount >= HConstants.RETRY_BACKOFF.length) { - triesCount = HConstants.RETRY_BACKOFF.length - 1; - } - return this.pause * HConstants.RETRY_BACKOFF[triesCount]; - } - @Override public void createTable(HTableDescriptor desc) throws IOException { @@ -1737,6 +1736,71 @@ public class HBaseAdmin implements Admin { ProtobufUtil.split(controller, admin, hri, splitPoint); } + + Future backupTablesAsync(final BackupRequest userRequest) throws IOException { + BackupClientUtil.checkTargetDir(userRequest.getTargetRootDir(), conf); + if (userRequest.getTableList() != null) { + for (TableName table : userRequest.getTableList()) { + if (!tableExists(table)) { + throw new DoNotRetryIOException(table + "does not exist"); + } + } + } + BackupTablesResponse response = executeCallable( + new MasterCallable(getConnection()) { + @Override + public BackupTablesResponse call(int callTimeout) throws ServiceException { + BackupTablesRequest request = RequestConverter.buildBackupTablesRequest( + userRequest.getBackupType(), userRequest.getTableList(), userRequest.getTargetRootDir(), + userRequest.getWorkers(), userRequest.getBandwidth()); + return master.backupTables(null, request); + } + }, (int) backupWaitTimeout); + return new TableBackupFuture(this, TableName.BACKUP_TABLE_NAME, response); + } + + String backupTables(final BackupRequest userRequest) throws IOException { + return get( + backupTablesAsync(userRequest), + backupWaitTimeout, + TimeUnit.SECONDS); + } + + public static class TableBackupFuture extends TableFuture { + String backupId; + public TableBackupFuture(final HBaseAdmin admin, final TableName tableName, + final BackupTablesResponse response) { + super(admin, tableName, + (response != null && response.hasProcId()) ? response.getProcId() : null); + backupId = response != null ? response.getBackupId() : "-1"; + } + + String getBackupId() { + return backupId; + } + + @Override + public String getOperationType() { + return "BACKUP"; + } + + @Override + protected String convertResult(final GetProcedureResultResponse response) throws IOException { + if (response.hasException()) { + throw ForeignExceptionUtil.toIOException(response.getException()); + } + ByteString result = response.getResult(); + if (result == null) return null; + return Bytes.toStringBinary(result.toByteArray()); + } + + @Override + protected String postOperationResult(final String result, + final long deadlineTs) throws IOException, TimeoutException { + return result; + } + } + @Override public Future modifyTable(final TableName tableName, final HTableDescriptor htd) throws IOException { @@ -2381,18 +2445,14 @@ public class HBaseAdmin implements Admin { snapshot(new SnapshotDescription(snapshotName, tableName.getNameAsString(), type)); } - @Override - public void snapshot(SnapshotDescription snapshotDesc) - throws IOException, SnapshotCreationException, IllegalArgumentException { - // actually take the snapshot + public void waitForSnapshot(SnapshotDescription snapshotDesc, long max, + Connection conn) throws IOException { HBaseProtos.SnapshotDescription snapshot = createHBaseProtosSnapshotDesc(snapshotDesc); - SnapshotResponse response = asyncSnapshot(snapshot); final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build(); IsSnapshotDoneResponse done = null; long start = EnvironmentEdgeManager.currentTime(); - long max = response.getExpectedTimeout(); - long maxPauseTime = max / this.numRetries; + long maxPauseTime = max / numRetries; int tries = 0; LOG.debug("Waiting a max of " + max + " ms for snapshot '" + ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " + @@ -2401,7 +2461,7 @@ public class HBaseAdmin implements Admin { || ((EnvironmentEdgeManager.currentTime() - start) < max && !done.getDone())) { try { // sleep a backoff <= pauseTime amount - long sleep = getPauseTime(tries++); + long sleep = getPauseTime(tries++, pause); sleep = sleep > maxPauseTime ? maxPauseTime : sleep; LOG.debug("(#" + tries + ") Sleeping: " + sleep + "ms while waiting for snapshot completion."); @@ -2410,7 +2470,7 @@ public class HBaseAdmin implements Admin { throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e); } LOG.debug("Getting current status of snapshot from master..."); - done = executeCallable(new MasterCallable(getConnection()) { + done = executeCallable(new MasterCallable(conn) { @Override public IsSnapshotDoneResponse call(int callTimeout) throws ServiceException { PayloadCarryingRpcController controller = rpcControllerFactory.newController(); @@ -2432,6 +2492,14 @@ public class HBaseAdmin implements Admin { asyncSnapshot(snapshot); } + @Override + public void snapshot(SnapshotDescription snapshot) throws IOException, SnapshotCreationException, + IllegalArgumentException { + // actually take the snapshot + SnapshotResponse response = asyncSnapshot(createHBaseProtosSnapshotDesc(snapshot)); + waitForSnapshot(snapshot, response.getExpectedTimeout(), getConnection()); + } + private HBaseProtos.SnapshotDescription createHBaseProtosSnapshotDesc(SnapshotDescription snapshotDesc) { HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder(); @@ -2671,6 +2739,14 @@ public class HBaseAdmin implements Admin { return response.hasReturnData() ? response.getReturnData().toByteArray() : null; } + public static long getPauseTime(int tries, long pause) { + int triesCount = tries; + if (triesCount >= HConstants.RETRY_BACKOFF.length) { + triesCount = HConstants.RETRY_BACKOFF.length - 1; + } + return pause * HConstants.RETRY_BACKOFF[triesCount]; + } + @Override public void execProcedure(String signature, String instance, Map props) throws IOException { @@ -2706,7 +2782,7 @@ public class HBaseAdmin implements Admin { || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) { try { // sleep a backoff <= pauseTime amount - long sleep = getPauseTime(tries++); + long sleep = getPauseTime(tries++, pause); sleep = sleep > maxPauseTime ? maxPauseTime : sleep; LOG.debug("(#" + tries + ") Sleeping: " + sleep + "ms while waiting for procedure completion."); @@ -2973,6 +3049,12 @@ public class HBaseAdmin implements Admin { return executeCallable(callable, rpcCallerFactory, operationTimeout, rpcTimeout); } + private & Closeable, V> V executeCallable(C callable, + int operationTimeout) + throws IOException { + return executeCallable(callable, rpcCallerFactory, operationTimeout, rpcTimeout); + } + static private & Closeable, V> V executeCallable(C callable, RpcRetryingCallerFactory rpcCallerFactory, int operationTimeout, int rpcTimeout) throws IOException { @@ -3363,7 +3445,7 @@ public class HBaseAdmin implements Admin { } try { - Thread.sleep(getAdmin().getPauseTime(tries++)); + Thread.sleep(getPauseTime(tries++, getAdmin().pause)); } catch (InterruptedException e) { throw new InterruptedException( "Interrupted while waiting for the result of proc " + procId); @@ -3465,7 +3547,7 @@ public class HBaseAdmin implements Admin { serverEx = e; } try { - Thread.sleep(getAdmin().getPauseTime(tries++)); + Thread.sleep(getPauseTime(tries++, getAdmin().pause)); } catch (InterruptedException e) { callable.throwInterruptedException(); } @@ -3642,7 +3724,7 @@ public class HBaseAdmin implements Admin { } try { - Thread.sleep(getAdmin().getPauseTime(tries++)); + Thread.sleep(getPauseTime(tries++, getAdmin().pause)); } catch (InterruptedException e) { throw new InterruptedIOException("Interrupted when opening" + " regions; " + actualRegCount.get() + " of " + numRegs + " regions processed so far"); @@ -3744,6 +3826,11 @@ public class HBaseAdmin implements Admin { }); } + @Override + public BackupAdmin getBackupAdmin() throws IOException { + return new HBaseBackupAdmin(this); + } + private HRegionInfo getMobRegionInfo(TableName tableName) { return new HRegionInfo(tableName, Bytes.toBytes(".mob"), HConstants.EMPTY_END_ROW, false, 0); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index b3bf041..4aba462 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -64,6 +64,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagUtil; +import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.CompactionState; @@ -106,6 +107,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; import org.apache.hadoop.hbase.protobuf.generated.CellProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; @@ -2779,6 +2781,10 @@ public final class ProtobufUtil { return tableNames; } + public static BackupProtos.BackupType toProtoBackupType(BackupType type) { + return BackupProtos.BackupType.valueOf(type.name()); + } + /** * Convert a protocol buffer CellVisibility to a client CellVisibility * diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 5fe2016..7ddf2a9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Action; import org.apache.hadoop.hbase.client.Append; @@ -63,6 +64,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.RegionUpdateInfo; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition; @@ -1297,6 +1299,22 @@ public final class RequestConverter { return builder.build(); } + public static BackupTablesRequest buildBackupTablesRequest( + final BackupType type, List tableList, String targetRootDir, final int workers, + final long bandwidth) { + BackupTablesRequest.Builder builder = BackupTablesRequest.newBuilder(); + builder.setType(ProtobufUtil.toProtoBackupType(type)); + builder.setTargetRootDir(targetRootDir); + builder.setWorkers(workers); + builder.setBandwidth(bandwidth); + if (tableList != null) { + for (TableName table : tableList) { + builder.addTables(ProtobufUtil.toProtoTableName(table)); + } + } + return builder.build(); + } + /** * Creates a protocol buffer GetSchemaAlterStatusRequest * diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 256c374..1423c4d 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1267,6 +1267,14 @@ public final class HConstants { public static final String DEFAULT_TEMPORARY_HDFS_DIRECTORY = "/user/" + System.getProperty("user.name") + "/hbase-staging"; + /** + * Backup/Restore constants + */ + public final static String BACKUP_ENABLE_KEY = "hbase.backup.enable"; + public final static boolean BACKUP_ENABLE_DEFAULT = true; + public final static String BACKUP_SYSTEM_TTL_KEY = "hbase.backup.system.ttl"; + public final static int BACKUP_SYSTEM_TTL_DEFAULT = FOREVER; + private HConstants() { // Can't be instantiated with this ctor. } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java index 63066b3..54d5f71 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java @@ -86,6 +86,10 @@ public final class TableName implements Comparable { public static final TableName NAMESPACE_TABLE_NAME = valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "namespace"); + /** The backup table's name. */ + public static final TableName BACKUP_TABLE_NAME = + valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "backup"); + public static final String OLD_META_STR = ".META."; public static final String OLD_ROOT_STR = "-ROOT-"; diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml index b7846ca..cae55a7 100644 --- a/hbase-protocol/pom.xml +++ b/hbase-protocol/pom.xml @@ -173,6 +173,7 @@ Admin.proto Aggregate.proto Authentication.proto + Backup.proto Cell.proto Client.proto ClusterId.proto diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index 4a92e14..d56bc60 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -54206,13 +54206,13 @@ public final class MasterProtos { */ long getProcId(); - // optional bool mayInterruptIfRunning = 2 [default = true]; + // optional bool may_interrupt_if_running = 2 [default = true]; /** - * optional bool mayInterruptIfRunning = 2 [default = true]; + * optional bool may_interrupt_if_running = 2 [default = true]; */ boolean hasMayInterruptIfRunning(); /** - * optional bool mayInterruptIfRunning = 2 [default = true]; + * optional bool may_interrupt_if_running = 2 [default = true]; */ boolean getMayInterruptIfRunning(); } @@ -54333,17 +54333,17 @@ public final class MasterProtos { return procId_; } - // optional bool mayInterruptIfRunning = 2 [default = true]; - public static final int MAYINTERRUPTIFRUNNING_FIELD_NUMBER = 2; + // optional bool may_interrupt_if_running = 2 [default = true]; + public static final int MAY_INTERRUPT_IF_RUNNING_FIELD_NUMBER = 2; private boolean mayInterruptIfRunning_; /** - * optional bool mayInterruptIfRunning = 2 [default = true]; + * optional bool may_interrupt_if_running = 2 [default = true]; */ public boolean hasMayInterruptIfRunning() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional bool mayInterruptIfRunning = 2 [default = true]; + * optional bool may_interrupt_if_running = 2 [default = true]; */ public boolean getMayInterruptIfRunning() { return mayInterruptIfRunning_; @@ -54443,7 +54443,7 @@ public final class MasterProtos { hash = (53 * hash) + hashLong(getProcId()); } if (hasMayInterruptIfRunning()) { - hash = (37 * hash) + MAYINTERRUPTIFRUNNING_FIELD_NUMBER; + hash = (37 * hash) + MAY_INTERRUPT_IF_RUNNING_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getMayInterruptIfRunning()); } hash = (29 * hash) + getUnknownFields().hashCode(); @@ -54681,22 +54681,22 @@ public final class MasterProtos { return this; } - // optional bool mayInterruptIfRunning = 2 [default = true]; + // optional bool may_interrupt_if_running = 2 [default = true]; private boolean mayInterruptIfRunning_ = true; /** - * optional bool mayInterruptIfRunning = 2 [default = true]; + * optional bool may_interrupt_if_running = 2 [default = true]; */ public boolean hasMayInterruptIfRunning() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional bool mayInterruptIfRunning = 2 [default = true]; + * optional bool may_interrupt_if_running = 2 [default = true]; */ public boolean getMayInterruptIfRunning() { return mayInterruptIfRunning_; } /** - * optional bool mayInterruptIfRunning = 2 [default = true]; + * optional bool may_interrupt_if_running = 2 [default = true]; */ public Builder setMayInterruptIfRunning(boolean value) { bitField0_ |= 0x00000002; @@ -54705,7 +54705,7 @@ public final class MasterProtos { return this; } /** - * optional bool mayInterruptIfRunning = 2 [default = true]; + * optional bool may_interrupt_if_running = 2 [default = true]; */ public Builder clearMayInterruptIfRunning() { bitField0_ = (bitField0_ & ~0x00000002); @@ -60541,6 +60541,1789 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:hbase.pb.SecurityCapabilitiesResponse) } + public interface BackupTablesRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.BackupType type = 1; + /** + * required .hbase.pb.BackupType type = 1; + */ + boolean hasType(); + /** + * required .hbase.pb.BackupType type = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType(); + + // repeated .hbase.pb.TableName tables = 2; + /** + * repeated .hbase.pb.TableName tables = 2; + */ + java.util.List + getTablesList(); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + int getTablesCount(); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + java.util.List + getTablesOrBuilderList(); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index); + + // required string target_root_dir = 3; + /** + * required string target_root_dir = 3; + */ + boolean hasTargetRootDir(); + /** + * required string target_root_dir = 3; + */ + java.lang.String getTargetRootDir(); + /** + * required string target_root_dir = 3; + */ + com.google.protobuf.ByteString + getTargetRootDirBytes(); + + // optional int64 workers = 4; + /** + * optional int64 workers = 4; + */ + boolean hasWorkers(); + /** + * optional int64 workers = 4; + */ + long getWorkers(); + + // optional int64 bandwidth = 5; + /** + * optional int64 bandwidth = 5; + */ + boolean hasBandwidth(); + /** + * optional int64 bandwidth = 5; + */ + long getBandwidth(); + } + /** + * Protobuf type {@code hbase.pb.BackupTablesRequest} + */ + public static final class BackupTablesRequest extends + com.google.protobuf.GeneratedMessage + implements BackupTablesRequestOrBuilder { + // Use BackupTablesRequest.newBuilder() to construct. + private BackupTablesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BackupTablesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BackupTablesRequest defaultInstance; + public static BackupTablesRequest getDefaultInstance() { + return defaultInstance; + } + + public BackupTablesRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BackupTablesRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + type_ = value; + } + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + tables_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + case 26: { + bitField0_ |= 0x00000002; + targetRootDir_ = input.readBytes(); + break; + } + case 32: { + bitField0_ |= 0x00000004; + workers_ = input.readInt64(); + break; + } + case 40: { + bitField0_ |= 0x00000008; + bandwidth_ = input.readInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BackupTablesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BackupTablesRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.BackupType type = 1; + public static final int TYPE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_; + /** + * required .hbase.pb.BackupType type = 1; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.BackupType type = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { + return type_; + } + + // repeated .hbase.pb.TableName tables = 2; + public static final int TABLES_FIELD_NUMBER = 2; + private java.util.List tables_; + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List getTablesList() { + return tables_; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List + getTablesOrBuilderList() { + return tables_; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public int getTablesCount() { + return tables_.size(); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { + return tables_.get(index); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + return tables_.get(index); + } + + // required string target_root_dir = 3; + public static final int TARGET_ROOT_DIR_FIELD_NUMBER = 3; + private java.lang.Object targetRootDir_; + /** + * required string target_root_dir = 3; + */ + public boolean hasTargetRootDir() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string target_root_dir = 3; + */ + public java.lang.String getTargetRootDir() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetRootDir_ = s; + } + return s; + } + } + /** + * required string target_root_dir = 3; + */ + public com.google.protobuf.ByteString + getTargetRootDirBytes() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetRootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional int64 workers = 4; + public static final int WORKERS_FIELD_NUMBER = 4; + private long workers_; + /** + * optional int64 workers = 4; + */ + public boolean hasWorkers() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int64 workers = 4; + */ + public long getWorkers() { + return workers_; + } + + // optional int64 bandwidth = 5; + public static final int BANDWIDTH_FIELD_NUMBER = 5; + private long bandwidth_; + /** + * optional int64 bandwidth = 5; + */ + public boolean hasBandwidth() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional int64 bandwidth = 5; + */ + public long getBandwidth() { + return bandwidth_; + } + + private void initFields() { + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + tables_ = java.util.Collections.emptyList(); + targetRootDir_ = ""; + workers_ = 0L; + bandwidth_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTargetRootDir()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, type_.getNumber()); + } + for (int i = 0; i < tables_.size(); i++) { + output.writeMessage(2, tables_.get(i)); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(3, getTargetRootDirBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt64(4, workers_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeInt64(5, bandwidth_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, type_.getNumber()); + } + for (int i = 0; i < tables_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tables_.get(i)); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getTargetRootDirBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(4, workers_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(5, bandwidth_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) obj; + + boolean result = true; + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && + (getType() == other.getType()); + } + result = result && getTablesList() + .equals(other.getTablesList()); + result = result && (hasTargetRootDir() == other.hasTargetRootDir()); + if (hasTargetRootDir()) { + result = result && getTargetRootDir() + .equals(other.getTargetRootDir()); + } + result = result && (hasWorkers() == other.hasWorkers()); + if (hasWorkers()) { + result = result && (getWorkers() + == other.getWorkers()); + } + result = result && (hasBandwidth() == other.hasBandwidth()); + if (hasBandwidth()) { + result = result && (getBandwidth() + == other.getBandwidth()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getType()); + } + if (getTablesCount() > 0) { + hash = (37 * hash) + TABLES_FIELD_NUMBER; + hash = (53 * hash) + getTablesList().hashCode(); + } + if (hasTargetRootDir()) { + hash = (37 * hash) + TARGET_ROOT_DIR_FIELD_NUMBER; + hash = (53 * hash) + getTargetRootDir().hashCode(); + } + if (hasWorkers()) { + hash = (37 * hash) + WORKERS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getWorkers()); + } + if (hasBandwidth()) { + hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getBandwidth()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupTablesRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTablesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + bitField0_ = (bitField0_ & ~0x00000001); + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + tablesBuilder_.clear(); + } + targetRootDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + workers_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + bandwidth_ = 0L; + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.type_ = type_; + if (tablesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.tables_ = tables_; + } else { + result.tables_ = tablesBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000002; + } + result.targetRootDir_ = targetRootDir_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000004; + } + result.workers_ = workers_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000008; + } + result.bandwidth_ = bandwidth_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance()) return this; + if (other.hasType()) { + setType(other.getType()); + } + if (tablesBuilder_ == null) { + if (!other.tables_.isEmpty()) { + if (tables_.isEmpty()) { + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureTablesIsMutable(); + tables_.addAll(other.tables_); + } + onChanged(); + } + } else { + if (!other.tables_.isEmpty()) { + if (tablesBuilder_.isEmpty()) { + tablesBuilder_.dispose(); + tablesBuilder_ = null; + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000002); + tablesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTablesFieldBuilder() : null; + } else { + tablesBuilder_.addAllMessages(other.tables_); + } + } + } + if (other.hasTargetRootDir()) { + bitField0_ |= 0x00000004; + targetRootDir_ = other.targetRootDir_; + onChanged(); + } + if (other.hasWorkers()) { + setWorkers(other.getWorkers()); + } + if (other.hasBandwidth()) { + setBandwidth(other.getBandwidth()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasType()) { + + return false; + } + if (!hasTargetRootDir()) { + + return false; + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.BackupType type = 1; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + /** + * required .hbase.pb.BackupType type = 1; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.BackupType type = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { + return type_; + } + /** + * required .hbase.pb.BackupType type = 1; + */ + public Builder setType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + type_ = value; + onChanged(); + return this; + } + /** + * required .hbase.pb.BackupType type = 1; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000001); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + onChanged(); + return this; + } + + // repeated .hbase.pb.TableName tables = 2; + private java.util.List tables_ = + java.util.Collections.emptyList(); + private void ensureTablesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = new java.util.ArrayList(tables_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tablesBuilder_; + + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List getTablesList() { + if (tablesBuilder_ == null) { + return java.util.Collections.unmodifiableList(tables_); + } else { + return tablesBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public int getTablesCount() { + if (tablesBuilder_ == null) { + return tables_.size(); + } else { + return tablesBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); + } else { + return tablesBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.set(index, value); + onChanged(); + } else { + tablesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.set(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(value); + onChanged(); + } else { + tablesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(index, value); + onChanged(); + } else { + tablesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addAllTables( + java.lang.Iterable values) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + super.addAll(values, tables_); + onChanged(); + } else { + tablesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder clearTables() { + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + tablesBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder removeTables(int index) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.remove(index); + onChanged(); + } else { + tablesBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTablesBuilder( + int index) { + return getTablesFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); } else { + return tablesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List + getTablesOrBuilderList() { + if (tablesBuilder_ != null) { + return tablesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tables_); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder() { + return getTablesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder( + int index) { + return getTablesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List + getTablesBuilderList() { + return getTablesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTablesFieldBuilder() { + if (tablesBuilder_ == null) { + tablesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tables_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + tables_ = null; + } + return tablesBuilder_; + } + + // required string target_root_dir = 3; + private java.lang.Object targetRootDir_ = ""; + /** + * required string target_root_dir = 3; + */ + public boolean hasTargetRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string target_root_dir = 3; + */ + public java.lang.String getTargetRootDir() { + java.lang.Object ref = targetRootDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetRootDir_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string target_root_dir = 3; + */ + public com.google.protobuf.ByteString + getTargetRootDirBytes() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetRootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string target_root_dir = 3; + */ + public Builder setTargetRootDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + targetRootDir_ = value; + onChanged(); + return this; + } + /** + * required string target_root_dir = 3; + */ + public Builder clearTargetRootDir() { + bitField0_ = (bitField0_ & ~0x00000004); + targetRootDir_ = getDefaultInstance().getTargetRootDir(); + onChanged(); + return this; + } + /** + * required string target_root_dir = 3; + */ + public Builder setTargetRootDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + targetRootDir_ = value; + onChanged(); + return this; + } + + // optional int64 workers = 4; + private long workers_ ; + /** + * optional int64 workers = 4; + */ + public boolean hasWorkers() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional int64 workers = 4; + */ + public long getWorkers() { + return workers_; + } + /** + * optional int64 workers = 4; + */ + public Builder setWorkers(long value) { + bitField0_ |= 0x00000008; + workers_ = value; + onChanged(); + return this; + } + /** + * optional int64 workers = 4; + */ + public Builder clearWorkers() { + bitField0_ = (bitField0_ & ~0x00000008); + workers_ = 0L; + onChanged(); + return this; + } + + // optional int64 bandwidth = 5; + private long bandwidth_ ; + /** + * optional int64 bandwidth = 5; + */ + public boolean hasBandwidth() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional int64 bandwidth = 5; + */ + public long getBandwidth() { + return bandwidth_; + } + /** + * optional int64 bandwidth = 5; + */ + public Builder setBandwidth(long value) { + bitField0_ |= 0x00000010; + bandwidth_ = value; + onChanged(); + return this; + } + /** + * optional int64 bandwidth = 5; + */ + public Builder clearBandwidth() { + bitField0_ = (bitField0_ & ~0x00000010); + bandwidth_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupTablesRequest) + } + + static { + defaultInstance = new BackupTablesRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BackupTablesRequest) + } + + public interface BackupTablesResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional uint64 proc_id = 1; + /** + * optional uint64 proc_id = 1; + */ + boolean hasProcId(); + /** + * optional uint64 proc_id = 1; + */ + long getProcId(); + + // optional string backup_id = 2; + /** + * optional string backup_id = 2; + */ + boolean hasBackupId(); + /** + * optional string backup_id = 2; + */ + java.lang.String getBackupId(); + /** + * optional string backup_id = 2; + */ + com.google.protobuf.ByteString + getBackupIdBytes(); + } + /** + * Protobuf type {@code hbase.pb.BackupTablesResponse} + */ + public static final class BackupTablesResponse extends + com.google.protobuf.GeneratedMessage + implements BackupTablesResponseOrBuilder { + // Use BackupTablesResponse.newBuilder() to construct. + private BackupTablesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BackupTablesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BackupTablesResponse defaultInstance; + public static BackupTablesResponse getDefaultInstance() { + return defaultInstance; + } + + public BackupTablesResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BackupTablesResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + backupId_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BackupTablesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BackupTablesResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional uint64 proc_id = 1; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + + // optional string backup_id = 2; + public static final int BACKUP_ID_FIELD_NUMBER = 2; + private java.lang.Object backupId_; + /** + * optional string backup_id = 2; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string backup_id = 2; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + backupId_ = s; + } + return s; + } + } + /** + * optional string backup_id = 2; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + procId_ = 0L; + backupId_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, procId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getBackupIdBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, procId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getBackupIdBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) obj; + + boolean result = true; + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupTablesResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + backupId_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.procId_ = procId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.backupId_ = backupId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); + } + if (other.hasBackupId()) { + bitField0_ |= 0x00000002; + backupId_ = other.backupId_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional uint64 proc_id = 1; + private long procId_ ; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); + return this; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000001); + procId_ = 0L; + onChanged(); + return this; + } + + // optional string backup_id = 2; + private java.lang.Object backupId_ = ""; + /** + * optional string backup_id = 2; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string backup_id = 2; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string backup_id = 2; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string backup_id = 2; + */ + public Builder setBackupId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + backupId_ = value; + onChanged(); + return this; + } + /** + * optional string backup_id = 2; + */ + public Builder clearBackupId() { + bitField0_ = (bitField0_ & ~0x00000002); + backupId_ = getDefaultInstance().getBackupId(); + onChanged(); + return this; + } + /** + * optional string backup_id = 2; + */ + public Builder setBackupIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + backupId_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupTablesResponse) + } + + static { + defaultInstance = new BackupTablesResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BackupTablesResponse) + } + /** * Protobuf service {@code hbase.pb.MasterService} */ @@ -61262,6 +63045,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc backupTables(.hbase.pb.BackupTablesRequest) returns (.hbase.pb.BackupTablesResponse); + * + *
+       ** backup table set 
+       * 
+ */ + public abstract void backupTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -61723,6 +63518,14 @@ public final class MasterProtos { impl.listProcedures(controller, request, done); } + @java.lang.Override + public void backupTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request, + com.google.protobuf.RpcCallback done) { + impl.backupTables(controller, request, done); + } + }; } @@ -61859,6 +63662,8 @@ public final class MasterProtos { return impl.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request); case 56: return impl.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request); + case 57: + return impl.backupTables(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -61987,6 +63792,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); + case 57: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -62115,6 +63922,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); + case 57: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -62836,6 +64645,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc backupTables(.hbase.pb.BackupTablesRequest) returns (.hbase.pb.BackupTablesResponse); + * + *
+     ** backup table set 
+     * 
+ */ + public abstract void backupTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -63143,6 +64964,11 @@ public final class MasterProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 57: + this.backupTables(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -63271,6 +65097,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); + case 57: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -63399,6 +65227,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); + case 57: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -64274,6 +66104,21 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance())); } + + public void backupTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(57), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -64566,6 +66411,11 @@ public final class MasterProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse backupTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -65258,6 +67108,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse backupTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(57), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:hbase.pb.MasterService) @@ -65818,6 +67680,16 @@ public final class MasterProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_BackupTablesRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_BackupTablesRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_BackupTablesResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -65827,337 +67699,345 @@ public final class MasterProtos { descriptor; static { java.lang.String[] descriptorData = { - "\n\014Master.proto\022\010hbase.pb\032\013HBase.proto\032\014C" + - "lient.proto\032\023ClusterStatus.proto\032\023ErrorH" + - "andling.proto\032\017Procedure.proto\032\013Quota.pr" + - "oto\"\234\001\n\020AddColumnRequest\022\'\n\ntable_name\030\001" + - " \002(\0132\023.hbase.pb.TableName\0225\n\017column_fami" + - "lies\030\002 \002(\0132\034.hbase.pb.ColumnFamilySchema" + - "\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:" + - "\0010\"$\n\021AddColumnResponse\022\017\n\007proc_id\030\001 \001(\004" + - "\"}\n\023DeleteColumnRequest\022\'\n\ntable_name\030\001 " + - "\002(\0132\023.hbase.pb.TableName\022\023\n\013column_name\030", - "\002 \002(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004" + - " \001(\004:\0010\"\'\n\024DeleteColumnResponse\022\017\n\007proc_" + - "id\030\001 \001(\004\"\237\001\n\023ModifyColumnRequest\022\'\n\ntabl" + - "e_name\030\001 \002(\0132\023.hbase.pb.TableName\0225\n\017col" + - "umn_families\030\002 \002(\0132\034.hbase.pb.ColumnFami" + - "lySchema\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonc" + - "e\030\004 \001(\004:\0010\"\'\n\024ModifyColumnResponse\022\017\n\007pr" + - "oc_id\030\001 \001(\004\"n\n\021MoveRegionRequest\022)\n\006regi" + - "on\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\020d" + - "est_server_name\030\002 \001(\0132\024.hbase.pb.ServerN", - "ame\"\024\n\022MoveRegionResponse\"\274\001\n\035DispatchMe" + - "rgingRegionsRequest\022+\n\010region_a\030\001 \002(\0132\031." + - "hbase.pb.RegionSpecifier\022+\n\010region_b\030\002 \002" + - "(\0132\031.hbase.pb.RegionSpecifier\022\027\n\010forcibl" + - "e\030\003 \001(\010:\005false\022\026\n\013nonce_group\030\004 \001(\004:\0010\022\020" + - "\n\005nonce\030\005 \001(\004:\0010\"1\n\036DispatchMergingRegio" + - "nsResponse\022\017\n\007proc_id\030\001 \001(\004\"@\n\023AssignReg" + - "ionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Re" + - "gionSpecifier\"\026\n\024AssignRegionResponse\"X\n" + - "\025UnassignRegionRequest\022)\n\006region\030\001 \002(\0132\031", - ".hbase.pb.RegionSpecifier\022\024\n\005force\030\002 \001(\010" + - ":\005false\"\030\n\026UnassignRegionResponse\"A\n\024Off" + - "lineRegionRequest\022)\n\006region\030\001 \002(\0132\031.hbas" + - "e.pb.RegionSpecifier\"\027\n\025OfflineRegionRes" + - "ponse\"\177\n\022CreateTableRequest\022+\n\014table_sch" + - "ema\030\001 \002(\0132\025.hbase.pb.TableSchema\022\022\n\nspli" + - "t_keys\030\002 \003(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005" + - "nonce\030\004 \001(\004:\0010\"&\n\023CreateTableResponse\022\017\n" + - "\007proc_id\030\001 \001(\004\"g\n\022DeleteTableRequest\022\'\n\n" + - "table_name\030\001 \002(\0132\023.hbase.pb.TableName\022\026\n", - "\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"" + - "&\n\023DeleteTableResponse\022\017\n\007proc_id\030\001 \001(\004\"" + - "\207\001\n\024TruncateTableRequest\022&\n\ttableName\030\001 " + - "\002(\0132\023.hbase.pb.TableName\022\035\n\016preserveSpli" + - "ts\030\002 \001(\010:\005false\022\026\n\013nonce_group\030\003 \001(\004:\0010\022" + - "\020\n\005nonce\030\004 \001(\004:\0010\"(\n\025TruncateTableRespon" + - "se\022\017\n\007proc_id\030\001 \001(\004\"g\n\022EnableTableReques" + + "\n\014Master.proto\022\010hbase.pb\032\013HBase.proto\032\014B" + + "ackup.proto\032\014Client.proto\032\023ClusterStatus" + + ".proto\032\023ErrorHandling.proto\032\017Procedure.p" + + "roto\032\013Quota.proto\"\234\001\n\020AddColumnRequest\022\'" + + "\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\022" + + "5\n\017column_families\030\002 \002(\0132\034.hbase.pb.Colu" + + "mnFamilySchema\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020" + + "\n\005nonce\030\004 \001(\004:\0010\"$\n\021AddColumnResponse\022\017\n" + + "\007proc_id\030\001 \001(\004\"}\n\023DeleteColumnRequest\022\'\n" + + "\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\022\023", + "\n\013column_name\030\002 \002(\014\022\026\n\013nonce_group\030\003 \001(\004" + + ":\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"\'\n\024DeleteColumnRes" + + "ponse\022\017\n\007proc_id\030\001 \001(\004\"\237\001\n\023ModifyColumnR" + + "equest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Ta" + + "bleName\0225\n\017column_families\030\002 \002(\0132\034.hbase" + + ".pb.ColumnFamilySchema\022\026\n\013nonce_group\030\003 " + + "\001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"\'\n\024ModifyColumn" + + "Response\022\017\n\007proc_id\030\001 \001(\004\"n\n\021MoveRegionR" + + "equest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Region" + + "Specifier\022.\n\020dest_server_name\030\002 \001(\0132\024.hb", + "ase.pb.ServerName\"\024\n\022MoveRegionResponse\"" + + "\274\001\n\035DispatchMergingRegionsRequest\022+\n\010reg" + + "ion_a\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022+" + + "\n\010region_b\030\002 \002(\0132\031.hbase.pb.RegionSpecif" + + "ier\022\027\n\010forcible\030\003 \001(\010:\005false\022\026\n\013nonce_gr" + + "oup\030\004 \001(\004:\0010\022\020\n\005nonce\030\005 \001(\004:\0010\"1\n\036Dispat" + + "chMergingRegionsResponse\022\017\n\007proc_id\030\001 \001(" + + "\004\"@\n\023AssignRegionRequest\022)\n\006region\030\001 \002(\013" + + "2\031.hbase.pb.RegionSpecifier\"\026\n\024AssignReg" + + "ionResponse\"X\n\025UnassignRegionRequest\022)\n\006", + "region\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022" + + "\024\n\005force\030\002 \001(\010:\005false\"\030\n\026UnassignRegionR" + + "esponse\"A\n\024OfflineRegionRequest\022)\n\006regio" + + "n\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\"\027\n\025Of" + + "flineRegionResponse\"\177\n\022CreateTableReques" + + "t\022+\n\014table_schema\030\001 \002(\0132\025.hbase.pb.Table" + + "Schema\022\022\n\nsplit_keys\030\002 \003(\014\022\026\n\013nonce_grou" + + "p\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"&\n\023CreateTa" + + "bleResponse\022\017\n\007proc_id\030\001 \001(\004\"g\n\022DeleteTa" + + "bleRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.p", + "b.TableName\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005n" + + "once\030\003 \001(\004:\0010\"&\n\023DeleteTableResponse\022\017\n\007" + + "proc_id\030\001 \001(\004\"\207\001\n\024TruncateTableRequest\022&" + + "\n\ttableName\030\001 \002(\0132\023.hbase.pb.TableName\022\035" + + "\n\016preserveSplits\030\002 \001(\010:\005false\022\026\n\013nonce_g" + + "roup\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"(\n\025Trunc" + + "ateTableResponse\022\017\n\007proc_id\030\001 \001(\004\"g\n\022Ena" + + "bleTableRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hb" + + "ase.pb.TableName\022\026\n\013nonce_group\030\002 \001(\004:\0010" + + "\022\020\n\005nonce\030\003 \001(\004:\0010\"&\n\023EnableTableRespons", + "e\022\017\n\007proc_id\030\001 \001(\004\"h\n\023DisableTableReques" + "t\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableNa" + "me\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(" + - "\004:\0010\"&\n\023EnableTableResponse\022\017\n\007proc_id\030\001", - " \001(\004\"h\n\023DisableTableRequest\022\'\n\ntable_nam" + - "e\030\001 \002(\0132\023.hbase.pb.TableName\022\026\n\013nonce_gr" + - "oup\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"\'\n\024Disabl" + - "eTableResponse\022\017\n\007proc_id\030\001 \001(\004\"\224\001\n\022Modi" + - "fyTableRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hba" + - "se.pb.TableName\022+\n\014table_schema\030\002 \002(\0132\025." + - "hbase.pb.TableSchema\022\026\n\013nonce_group\030\003 \001(" + - "\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"&\n\023ModifyTableRes" + - "ponse\022\017\n\007proc_id\030\001 \001(\004\"~\n\026CreateNamespac" + - "eRequest\022:\n\023namespaceDescriptor\030\001 \002(\0132\035.", - "hbase.pb.NamespaceDescriptor\022\026\n\013nonce_gr" + - "oup\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027Create" + - "NamespaceResponse\022\017\n\007proc_id\030\001 \001(\004\"Y\n\026De" + - "leteNamespaceRequest\022\025\n\rnamespaceName\030\001 " + - "\002(\t\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001" + - "(\004:\0010\"*\n\027DeleteNamespaceResponse\022\017\n\007proc" + - "_id\030\001 \001(\004\"~\n\026ModifyNamespaceRequest\022:\n\023n" + - "amespaceDescriptor\030\001 \002(\0132\035.hbase.pb.Name" + - "spaceDescriptor\022\026\n\013nonce_group\030\002 \001(\004:\0010\022" + - "\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027ModifyNamespaceResp", - "onse\022\017\n\007proc_id\030\001 \001(\004\"6\n\035GetNamespaceDes" + - "criptorRequest\022\025\n\rnamespaceName\030\001 \002(\t\"\\\n" + - "\036GetNamespaceDescriptorResponse\022:\n\023names" + - "paceDescriptor\030\001 \002(\0132\035.hbase.pb.Namespac" + - "eDescriptor\"!\n\037ListNamespaceDescriptorsR" + - "equest\"^\n ListNamespaceDescriptorsRespon" + - "se\022:\n\023namespaceDescriptor\030\001 \003(\0132\035.hbase." + - "pb.NamespaceDescriptor\"?\n&ListTableDescr" + - "iptorsByNamespaceRequest\022\025\n\rnamespaceNam" + - "e\030\001 \002(\t\"U\n\'ListTableDescriptorsByNamespa", - "ceResponse\022*\n\013tableSchema\030\001 \003(\0132\025.hbase." + - "pb.TableSchema\"9\n ListTableNamesByNamesp" + - "aceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"K\n!Lis" + - "tTableNamesByNamespaceResponse\022&\n\ttableN" + - "ame\030\001 \003(\0132\023.hbase.pb.TableName\"\021\n\017Shutdo" + - "wnRequest\"\022\n\020ShutdownResponse\"\023\n\021StopMas" + - "terRequest\"\024\n\022StopMasterResponse\"\037\n\016Bala" + - "nceRequest\022\r\n\005force\030\001 \001(\010\"\'\n\017BalanceResp" + - "onse\022\024\n\014balancer_ran\030\001 \002(\010\"<\n\031SetBalance" + - "rRunningRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchrono", - "us\030\002 \001(\010\"8\n\032SetBalancerRunningResponse\022\032" + - "\n\022prev_balance_value\030\001 \001(\010\"\032\n\030IsBalancer" + - "EnabledRequest\",\n\031IsBalancerEnabledRespo" + - "nse\022\017\n\007enabled\030\001 \002(\010\"\212\001\n\035SetSplitOrMerge" + - "EnabledRequest\022\017\n\007enabled\030\001 \002(\010\022\023\n\013synch" + - "ronous\030\002 \001(\010\0220\n\014switch_types\030\003 \003(\0162\032.hba" + - "se.pb.MasterSwitchType\022\021\n\tskip_lock\030\004 \001(" + - "\010\"4\n\036SetSplitOrMergeEnabledResponse\022\022\n\np" + - "rev_value\030\001 \003(\010\"O\n\034IsSplitOrMergeEnabled" + - "Request\022/\n\013switch_type\030\001 \002(\0162\032.hbase.pb.", - "MasterSwitchType\"0\n\035IsSplitOrMergeEnable" + - "dResponse\022\017\n\007enabled\030\001 \002(\010\"+\n)ReleaseSpl" + - "itOrMergeLockAndRollbackRequest\",\n*Relea" + - "seSplitOrMergeLockAndRollbackResponse\"\022\n" + - "\020NormalizeRequest\"+\n\021NormalizeResponse\022\026" + - "\n\016normalizer_ran\030\001 \002(\010\")\n\033SetNormalizerR" + - "unningRequest\022\n\n\002on\030\001 \002(\010\"=\n\034SetNormaliz" + - "erRunningResponse\022\035\n\025prev_normalizer_val" + - "ue\030\001 \001(\010\"\034\n\032IsNormalizerEnabledRequest\"." + - "\n\033IsNormalizerEnabledResponse\022\017\n\007enabled", - "\030\001 \002(\010\"\027\n\025RunCatalogScanRequest\"-\n\026RunCa" + - "talogScanResponse\022\023\n\013scan_result\030\001 \001(\005\"-" + - "\n\033EnableCatalogJanitorRequest\022\016\n\006enable\030" + - "\001 \002(\010\"2\n\034EnableCatalogJanitorResponse\022\022\n" + - "\nprev_value\030\001 \001(\010\" \n\036IsCatalogJanitorEna" + - "bledRequest\"0\n\037IsCatalogJanitorEnabledRe" + - "sponse\022\r\n\005value\030\001 \002(\010\"B\n\017SnapshotRequest" + - "\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.SnapshotDe" + - "scription\",\n\020SnapshotResponse\022\030\n\020expecte" + - "d_timeout\030\001 \002(\003\"\036\n\034GetCompletedSnapshots", - "Request\"Q\n\035GetCompletedSnapshotsResponse" + - "\0220\n\tsnapshots\030\001 \003(\0132\035.hbase.pb.SnapshotD" + - "escription\"H\n\025DeleteSnapshotRequest\022/\n\010s" + - "napshot\030\001 \002(\0132\035.hbase.pb.SnapshotDescrip" + - "tion\"\030\n\026DeleteSnapshotResponse\"s\n\026Restor" + - "eSnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hba" + - "se.pb.SnapshotDescription\022\026\n\013nonce_group" + - "\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027RestoreSn" + - "apshotResponse\022\017\n\007proc_id\030\001 \002(\004\"H\n\025IsSna" + - "pshotDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.hba", - "se.pb.SnapshotDescription\"^\n\026IsSnapshotD" + - "oneResponse\022\023\n\004done\030\001 \001(\010:\005false\022/\n\010snap" + - "shot\030\002 \001(\0132\035.hbase.pb.SnapshotDescriptio" + - "n\"O\n\034IsRestoreSnapshotDoneRequest\022/\n\010sna" + - "pshot\030\001 \001(\0132\035.hbase.pb.SnapshotDescripti" + - "on\"4\n\035IsRestoreSnapshotDoneResponse\022\023\n\004d" + - "one\030\001 \001(\010:\005false\"F\n\033GetSchemaAlterStatus" + - "Request\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.T" + - "ableName\"T\n\034GetSchemaAlterStatusResponse" + - "\022\035\n\025yet_to_update_regions\030\001 \001(\r\022\025\n\rtotal", - "_regions\030\002 \001(\r\"\213\001\n\032GetTableDescriptorsRe" + - "quest\022(\n\013table_names\030\001 \003(\0132\023.hbase.pb.Ta" + - "bleName\022\r\n\005regex\030\002 \001(\t\022!\n\022include_sys_ta" + - "bles\030\003 \001(\010:\005false\022\021\n\tnamespace\030\004 \001(\t\"J\n\033" + - "GetTableDescriptorsResponse\022+\n\014table_sch" + - "ema\030\001 \003(\0132\025.hbase.pb.TableSchema\"[\n\024GetT" + - "ableNamesRequest\022\r\n\005regex\030\001 \001(\t\022!\n\022inclu" + - "de_sys_tables\030\002 \001(\010:\005false\022\021\n\tnamespace\030" + - "\003 \001(\t\"A\n\025GetTableNamesResponse\022(\n\013table_" + - "names\030\001 \003(\0132\023.hbase.pb.TableName\"?\n\024GetT", - "ableStateRequest\022\'\n\ntable_name\030\001 \002(\0132\023.h" + - "base.pb.TableName\"B\n\025GetTableStateRespon" + - "se\022)\n\013table_state\030\001 \002(\0132\024.hbase.pb.Table" + - "State\"\031\n\027GetClusterStatusRequest\"K\n\030GetC" + - "lusterStatusResponse\022/\n\016cluster_status\030\001" + - " \002(\0132\027.hbase.pb.ClusterStatus\"\030\n\026IsMaste" + - "rRunningRequest\"4\n\027IsMasterRunningRespon" + - "se\022\031\n\021is_master_running\030\001 \002(\010\"I\n\024ExecPro" + - "cedureRequest\0221\n\tprocedure\030\001 \002(\0132\036.hbase" + - ".pb.ProcedureDescription\"F\n\025ExecProcedur", - "eResponse\022\030\n\020expected_timeout\030\001 \001(\003\022\023\n\013r" + - "eturn_data\030\002 \001(\014\"K\n\026IsProcedureDoneReque" + - "st\0221\n\tprocedure\030\001 \001(\0132\036.hbase.pb.Procedu" + - "reDescription\"`\n\027IsProcedureDoneResponse" + - "\022\023\n\004done\030\001 \001(\010:\005false\0220\n\010snapshot\030\002 \001(\0132" + - "\036.hbase.pb.ProcedureDescription\",\n\031GetPr" + - "ocedureResultRequest\022\017\n\007proc_id\030\001 \002(\004\"\371\001" + - "\n\032GetProcedureResultResponse\0229\n\005state\030\001 " + - "\002(\0162*.hbase.pb.GetProcedureResultRespons" + - "e.State\022\022\n\nstart_time\030\002 \001(\004\022\023\n\013last_upda", - "te\030\003 \001(\004\022\016\n\006result\030\004 \001(\014\0224\n\texception\030\005 " + - "\001(\0132!.hbase.pb.ForeignExceptionMessage\"1" + - "\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n\010F" + - "INISHED\020\002\"M\n\025AbortProcedureRequest\022\017\n\007pr" + - "oc_id\030\001 \002(\004\022#\n\025mayInterruptIfRunning\030\002 \001" + - "(\010:\004true\"6\n\026AbortProcedureResponse\022\034\n\024is" + - "_procedure_aborted\030\001 \002(\010\"\027\n\025ListProcedur" + - "esRequest\"@\n\026ListProceduresResponse\022&\n\tp" + - "rocedure\030\001 \003(\0132\023.hbase.pb.Procedure\"\315\001\n\017" + - "SetQuotaRequest\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nus", - "er_group\030\002 \001(\t\022\021\n\tnamespace\030\003 \001(\t\022\'\n\ntab" + - "le_name\030\004 \001(\0132\023.hbase.pb.TableName\022\022\n\nre" + - "move_all\030\005 \001(\010\022\026\n\016bypass_globals\030\006 \001(\010\022+" + - "\n\010throttle\030\007 \001(\0132\031.hbase.pb.ThrottleRequ" + - "est\"\022\n\020SetQuotaResponse\"J\n\037MajorCompacti" + - "onTimestampRequest\022\'\n\ntable_name\030\001 \002(\0132\023" + - ".hbase.pb.TableName\"U\n(MajorCompactionTi" + - "mestampForRegionRequest\022)\n\006region\030\001 \002(\0132" + - "\031.hbase.pb.RegionSpecifier\"@\n MajorCompa" + - "ctionTimestampResponse\022\034\n\024compaction_tim", - "estamp\030\001 \002(\003\"\035\n\033SecurityCapabilitiesRequ" + - "est\"\354\001\n\034SecurityCapabilitiesResponse\022G\n\014" + - "capabilities\030\001 \003(\01621.hbase.pb.SecurityCa" + - "pabilitiesResponse.Capability\"\202\001\n\nCapabi" + - "lity\022\031\n\025SIMPLE_AUTHENTICATION\020\000\022\031\n\025SECUR" + - "E_AUTHENTICATION\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n" + - "\022CELL_AUTHORIZATION\020\003\022\023\n\017CELL_VISIBILITY" + - "\020\004*(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005MER" + - "GE\020\0012\373(\n\rMasterService\022e\n\024GetSchemaAlter" + - "Status\022%.hbase.pb.GetSchemaAlterStatusRe", - "quest\032&.hbase.pb.GetSchemaAlterStatusRes" + - "ponse\022b\n\023GetTableDescriptors\022$.hbase.pb." + - "GetTableDescriptorsRequest\032%.hbase.pb.Ge" + - "tTableDescriptorsResponse\022P\n\rGetTableNam" + - "es\022\036.hbase.pb.GetTableNamesRequest\032\037.hba" + - "se.pb.GetTableNamesResponse\022Y\n\020GetCluste" + - "rStatus\022!.hbase.pb.GetClusterStatusReque" + - "st\032\".hbase.pb.GetClusterStatusResponse\022V" + - "\n\017IsMasterRunning\022 .hbase.pb.IsMasterRun" + - "ningRequest\032!.hbase.pb.IsMasterRunningRe", - "sponse\022D\n\tAddColumn\022\032.hbase.pb.AddColumn" + - "Request\032\033.hbase.pb.AddColumnResponse\022M\n\014" + - "DeleteColumn\022\035.hbase.pb.DeleteColumnRequ" + - "est\032\036.hbase.pb.DeleteColumnResponse\022M\n\014M" + - "odifyColumn\022\035.hbase.pb.ModifyColumnReque" + - "st\032\036.hbase.pb.ModifyColumnResponse\022G\n\nMo" + - "veRegion\022\033.hbase.pb.MoveRegionRequest\032\034." + - "hbase.pb.MoveRegionResponse\022k\n\026DispatchM" + - "ergingRegions\022\'.hbase.pb.DispatchMerging" + - "RegionsRequest\032(.hbase.pb.DispatchMergin", - "gRegionsResponse\022M\n\014AssignRegion\022\035.hbase" + - ".pb.AssignRegionRequest\032\036.hbase.pb.Assig" + - "nRegionResponse\022S\n\016UnassignRegion\022\037.hbas" + - "e.pb.UnassignRegionRequest\032 .hbase.pb.Un" + - "assignRegionResponse\022P\n\rOfflineRegion\022\036." + - "hbase.pb.OfflineRegionRequest\032\037.hbase.pb" + - ".OfflineRegionResponse\022J\n\013DeleteTable\022\034." + - "hbase.pb.DeleteTableRequest\032\035.hbase.pb.D" + - "eleteTableResponse\022P\n\rtruncateTable\022\036.hb" + - "ase.pb.TruncateTableRequest\032\037.hbase.pb.T", - "runcateTableResponse\022J\n\013EnableTable\022\034.hb" + - "ase.pb.EnableTableRequest\032\035.hbase.pb.Ena" + - "bleTableResponse\022M\n\014DisableTable\022\035.hbase" + - ".pb.DisableTableRequest\032\036.hbase.pb.Disab" + - "leTableResponse\022J\n\013ModifyTable\022\034.hbase.p" + - "b.ModifyTableRequest\032\035.hbase.pb.ModifyTa" + - "bleResponse\022J\n\013CreateTable\022\034.hbase.pb.Cr" + - "eateTableRequest\032\035.hbase.pb.CreateTableR" + - "esponse\022A\n\010Shutdown\022\031.hbase.pb.ShutdownR" + - "equest\032\032.hbase.pb.ShutdownResponse\022G\n\nSt", - "opMaster\022\033.hbase.pb.StopMasterRequest\032\034." + - "hbase.pb.StopMasterResponse\022>\n\007Balance\022\030" + - ".hbase.pb.BalanceRequest\032\031.hbase.pb.Bala" + - "nceResponse\022_\n\022SetBalancerRunning\022#.hbas" + - "e.pb.SetBalancerRunningRequest\032$.hbase.p" + - "b.SetBalancerRunningResponse\022\\\n\021IsBalanc" + - "erEnabled\022\".hbase.pb.IsBalancerEnabledRe" + - "quest\032#.hbase.pb.IsBalancerEnabledRespon" + - "se\022k\n\026SetSplitOrMergeEnabled\022\'.hbase.pb." + - "SetSplitOrMergeEnabledRequest\032(.hbase.pb", - ".SetSplitOrMergeEnabledResponse\022h\n\025IsSpl" + - "itOrMergeEnabled\022&.hbase.pb.IsSplitOrMer" + - "geEnabledRequest\032\'.hbase.pb.IsSplitOrMer" + - "geEnabledResponse\022\217\001\n\"ReleaseSplitOrMerg" + - "eLockAndRollback\0223.hbase.pb.ReleaseSplit" + - "OrMergeLockAndRollbackRequest\0324.hbase.pb" + - ".ReleaseSplitOrMergeLockAndRollbackRespo" + - "nse\022D\n\tNormalize\022\032.hbase.pb.NormalizeReq" + - "uest\032\033.hbase.pb.NormalizeResponse\022e\n\024Set" + - "NormalizerRunning\022%.hbase.pb.SetNormaliz", - "erRunningRequest\032&.hbase.pb.SetNormalize" + - "rRunningResponse\022b\n\023IsNormalizerEnabled\022" + - "$.hbase.pb.IsNormalizerEnabledRequest\032%." + - "hbase.pb.IsNormalizerEnabledResponse\022S\n\016" + - "RunCatalogScan\022\037.hbase.pb.RunCatalogScan" + - "Request\032 .hbase.pb.RunCatalogScanRespons" + - "e\022e\n\024EnableCatalogJanitor\022%.hbase.pb.Ena" + - "bleCatalogJanitorRequest\032&.hbase.pb.Enab" + - "leCatalogJanitorResponse\022n\n\027IsCatalogJan" + - "itorEnabled\022(.hbase.pb.IsCatalogJanitorE", - "nabledRequest\032).hbase.pb.IsCatalogJanito" + - "rEnabledResponse\022^\n\021ExecMasterService\022#." + - "hbase.pb.CoprocessorServiceRequest\032$.hba" + - "se.pb.CoprocessorServiceResponse\022A\n\010Snap" + - "shot\022\031.hbase.pb.SnapshotRequest\032\032.hbase." + - "pb.SnapshotResponse\022h\n\025GetCompletedSnaps" + - "hots\022&.hbase.pb.GetCompletedSnapshotsReq" + - "uest\032\'.hbase.pb.GetCompletedSnapshotsRes" + - "ponse\022S\n\016DeleteSnapshot\022\037.hbase.pb.Delet" + - "eSnapshotRequest\032 .hbase.pb.DeleteSnapsh", - "otResponse\022S\n\016IsSnapshotDone\022\037.hbase.pb." + - "IsSnapshotDoneRequest\032 .hbase.pb.IsSnaps" + - "hotDoneResponse\022V\n\017RestoreSnapshot\022 .hba" + - "se.pb.RestoreSnapshotRequest\032!.hbase.pb." + - "RestoreSnapshotResponse\022P\n\rExecProcedure" + - "\022\036.hbase.pb.ExecProcedureRequest\032\037.hbase" + - ".pb.ExecProcedureResponse\022W\n\024ExecProcedu" + - "reWithRet\022\036.hbase.pb.ExecProcedureReques" + - "t\032\037.hbase.pb.ExecProcedureResponse\022V\n\017Is" + - "ProcedureDone\022 .hbase.pb.IsProcedureDone", - "Request\032!.hbase.pb.IsProcedureDoneRespon" + - "se\022V\n\017ModifyNamespace\022 .hbase.pb.ModifyN" + - "amespaceRequest\032!.hbase.pb.ModifyNamespa" + - "ceResponse\022V\n\017CreateNamespace\022 .hbase.pb" + - ".CreateNamespaceRequest\032!.hbase.pb.Creat" + - "eNamespaceResponse\022V\n\017DeleteNamespace\022 ." + - "hbase.pb.DeleteNamespaceRequest\032!.hbase." + - "pb.DeleteNamespaceResponse\022k\n\026GetNamespa" + - "ceDescriptor\022\'.hbase.pb.GetNamespaceDesc" + - "riptorRequest\032(.hbase.pb.GetNamespaceDes", - "criptorResponse\022q\n\030ListNamespaceDescript" + - "ors\022).hbase.pb.ListNamespaceDescriptorsR" + - "equest\032*.hbase.pb.ListNamespaceDescripto" + - "rsResponse\022\206\001\n\037ListTableDescriptorsByNam" + - "espace\0220.hbase.pb.ListTableDescriptorsBy" + - "NamespaceRequest\0321.hbase.pb.ListTableDes" + - "criptorsByNamespaceResponse\022t\n\031ListTable" + - "NamesByNamespace\022*.hbase.pb.ListTableNam" + - "esByNamespaceRequest\032+.hbase.pb.ListTabl" + - "eNamesByNamespaceResponse\022P\n\rGetTableSta", - "te\022\036.hbase.pb.GetTableStateRequest\032\037.hba" + - "se.pb.GetTableStateResponse\022A\n\010SetQuota\022" + - "\031.hbase.pb.SetQuotaRequest\032\032.hbase.pb.Se" + - "tQuotaResponse\022x\n\037getLastMajorCompaction" + - "Timestamp\022).hbase.pb.MajorCompactionTime" + - "stampRequest\032*.hbase.pb.MajorCompactionT" + - "imestampResponse\022\212\001\n(getLastMajorCompact" + - "ionTimestampForRegion\0222.hbase.pb.MajorCo" + - "mpactionTimestampForRegionRequest\032*.hbas" + - "e.pb.MajorCompactionTimestampResponse\022_\n", - "\022getProcedureResult\022#.hbase.pb.GetProced" + - "ureResultRequest\032$.hbase.pb.GetProcedure" + - "ResultResponse\022h\n\027getSecurityCapabilitie" + - "s\022%.hbase.pb.SecurityCapabilitiesRequest" + - "\032&.hbase.pb.SecurityCapabilitiesResponse" + - "\022S\n\016AbortProcedure\022\037.hbase.pb.AbortProce" + - "dureRequest\032 .hbase.pb.AbortProcedureRes" + - "ponse\022S\n\016ListProcedures\022\037.hbase.pb.ListP" + - "roceduresRequest\032 .hbase.pb.ListProcedur" + - "esResponseBB\n*org.apache.hadoop.hbase.pr", - "otobuf.generatedB\014MasterProtosH\001\210\001\001\240\001\001" + "\004:\0010\"\'\n\024DisableTableResponse\022\017\n\007proc_id\030" + + "\001 \001(\004\"\224\001\n\022ModifyTableRequest\022\'\n\ntable_na" + + "me\030\001 \002(\0132\023.hbase.pb.TableName\022+\n\014table_s" + + "chema\030\002 \002(\0132\025.hbase.pb.TableSchema\022\026\n\013no" + + "nce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"&\n\023" + + "ModifyTableResponse\022\017\n\007proc_id\030\001 \001(\004\"~\n\026" + + "CreateNamespaceRequest\022:\n\023namespaceDescr", + "iptor\030\001 \002(\0132\035.hbase.pb.NamespaceDescript" + + "or\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(" + + "\004:\0010\"*\n\027CreateNamespaceResponse\022\017\n\007proc_" + + "id\030\001 \001(\004\"Y\n\026DeleteNamespaceRequest\022\025\n\rna" + + "mespaceName\030\001 \002(\t\022\026\n\013nonce_group\030\002 \001(\004:\001" + + "0\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027DeleteNamespaceRe" + + "sponse\022\017\n\007proc_id\030\001 \001(\004\"~\n\026ModifyNamespa" + + "ceRequest\022:\n\023namespaceDescriptor\030\001 \002(\0132\035" + + ".hbase.pb.NamespaceDescriptor\022\026\n\013nonce_g" + + "roup\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027Modif", + "yNamespaceResponse\022\017\n\007proc_id\030\001 \001(\004\"6\n\035G" + + "etNamespaceDescriptorRequest\022\025\n\rnamespac" + + "eName\030\001 \002(\t\"\\\n\036GetNamespaceDescriptorRes" + + "ponse\022:\n\023namespaceDescriptor\030\001 \002(\0132\035.hba" + + "se.pb.NamespaceDescriptor\"!\n\037ListNamespa" + + "ceDescriptorsRequest\"^\n ListNamespaceDes" + + "criptorsResponse\022:\n\023namespaceDescriptor\030" + + "\001 \003(\0132\035.hbase.pb.NamespaceDescriptor\"?\n&" + + "ListTableDescriptorsByNamespaceRequest\022\025" + + "\n\rnamespaceName\030\001 \002(\t\"U\n\'ListTableDescri", + "ptorsByNamespaceResponse\022*\n\013tableSchema\030" + + "\001 \003(\0132\025.hbase.pb.TableSchema\"9\n ListTabl" + + "eNamesByNamespaceRequest\022\025\n\rnamespaceNam" + + "e\030\001 \002(\t\"K\n!ListTableNamesByNamespaceResp" + + "onse\022&\n\ttableName\030\001 \003(\0132\023.hbase.pb.Table" + + "Name\"\021\n\017ShutdownRequest\"\022\n\020ShutdownRespo" + + "nse\"\023\n\021StopMasterRequest\"\024\n\022StopMasterRe" + + "sponse\"\037\n\016BalanceRequest\022\r\n\005force\030\001 \001(\010\"" + + "\'\n\017BalanceResponse\022\024\n\014balancer_ran\030\001 \002(\010" + + "\"<\n\031SetBalancerRunningRequest\022\n\n\002on\030\001 \002(", + "\010\022\023\n\013synchronous\030\002 \001(\010\"8\n\032SetBalancerRun" + + "ningResponse\022\032\n\022prev_balance_value\030\001 \001(\010" + + "\"\032\n\030IsBalancerEnabledRequest\",\n\031IsBalanc" + + "erEnabledResponse\022\017\n\007enabled\030\001 \002(\010\"\212\001\n\035S" + + "etSplitOrMergeEnabledRequest\022\017\n\007enabled\030" + + "\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\0220\n\014switch_typ" + + "es\030\003 \003(\0162\032.hbase.pb.MasterSwitchType\022\021\n\t" + + "skip_lock\030\004 \001(\010\"4\n\036SetSplitOrMergeEnable" + + "dResponse\022\022\n\nprev_value\030\001 \003(\010\"O\n\034IsSplit" + + "OrMergeEnabledRequest\022/\n\013switch_type\030\001 \002", + "(\0162\032.hbase.pb.MasterSwitchType\"0\n\035IsSpli" + + "tOrMergeEnabledResponse\022\017\n\007enabled\030\001 \002(\010" + + "\"+\n)ReleaseSplitOrMergeLockAndRollbackRe" + + "quest\",\n*ReleaseSplitOrMergeLockAndRollb" + + "ackResponse\"\022\n\020NormalizeRequest\"+\n\021Norma" + + "lizeResponse\022\026\n\016normalizer_ran\030\001 \002(\010\")\n\033" + + "SetNormalizerRunningRequest\022\n\n\002on\030\001 \002(\010\"" + + "=\n\034SetNormalizerRunningResponse\022\035\n\025prev_" + + "normalizer_value\030\001 \001(\010\"\034\n\032IsNormalizerEn" + + "abledRequest\".\n\033IsNormalizerEnabledRespo", + "nse\022\017\n\007enabled\030\001 \002(\010\"\027\n\025RunCatalogScanRe" + + "quest\"-\n\026RunCatalogScanResponse\022\023\n\013scan_" + + "result\030\001 \001(\005\"-\n\033EnableCatalogJanitorRequ" + + "est\022\016\n\006enable\030\001 \002(\010\"2\n\034EnableCatalogJani" + + "torResponse\022\022\n\nprev_value\030\001 \001(\010\" \n\036IsCat" + + "alogJanitorEnabledRequest\"0\n\037IsCatalogJa" + + "nitorEnabledResponse\022\r\n\005value\030\001 \002(\010\"B\n\017S" + + "napshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hbase" + + ".pb.SnapshotDescription\",\n\020SnapshotRespo" + + "nse\022\030\n\020expected_timeout\030\001 \002(\003\"\036\n\034GetComp", + "letedSnapshotsRequest\"Q\n\035GetCompletedSna" + + "pshotsResponse\0220\n\tsnapshots\030\001 \003(\0132\035.hbas" + + "e.pb.SnapshotDescription\"H\n\025DeleteSnapsh" + + "otRequest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.S" + + "napshotDescription\"\030\n\026DeleteSnapshotResp" + + "onse\"s\n\026RestoreSnapshotRequest\022/\n\010snapsh" + + "ot\030\001 \002(\0132\035.hbase.pb.SnapshotDescription\022" + + "\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\001" + + "0\"*\n\027RestoreSnapshotResponse\022\017\n\007proc_id\030" + + "\001 \002(\004\"H\n\025IsSnapshotDoneRequest\022/\n\010snapsh", + "ot\030\001 \001(\0132\035.hbase.pb.SnapshotDescription\"" + + "^\n\026IsSnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:" + + "\005false\022/\n\010snapshot\030\002 \001(\0132\035.hbase.pb.Snap" + + "shotDescription\"O\n\034IsRestoreSnapshotDone" + + "Request\022/\n\010snapshot\030\001 \001(\0132\035.hbase.pb.Sna" + + "pshotDescription\"4\n\035IsRestoreSnapshotDon" + + "eResponse\022\023\n\004done\030\001 \001(\010:\005false\"F\n\033GetSch" + + "emaAlterStatusRequest\022\'\n\ntable_name\030\001 \002(" + + "\0132\023.hbase.pb.TableName\"T\n\034GetSchemaAlter" + + "StatusResponse\022\035\n\025yet_to_update_regions\030", + "\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"\213\001\n\032GetTabl" + + "eDescriptorsRequest\022(\n\013table_names\030\001 \003(\013" + + "2\023.hbase.pb.TableName\022\r\n\005regex\030\002 \001(\t\022!\n\022" + + "include_sys_tables\030\003 \001(\010:\005false\022\021\n\tnames" + + "pace\030\004 \001(\t\"J\n\033GetTableDescriptorsRespons" + + "e\022+\n\014table_schema\030\001 \003(\0132\025.hbase.pb.Table" + + "Schema\"[\n\024GetTableNamesRequest\022\r\n\005regex\030" + + "\001 \001(\t\022!\n\022include_sys_tables\030\002 \001(\010:\005false" + + "\022\021\n\tnamespace\030\003 \001(\t\"A\n\025GetTableNamesResp" + + "onse\022(\n\013table_names\030\001 \003(\0132\023.hbase.pb.Tab", + "leName\"?\n\024GetTableStateRequest\022\'\n\ntable_" + + "name\030\001 \002(\0132\023.hbase.pb.TableName\"B\n\025GetTa" + + "bleStateResponse\022)\n\013table_state\030\001 \002(\0132\024." + + "hbase.pb.TableState\"\031\n\027GetClusterStatusR" + + "equest\"K\n\030GetClusterStatusResponse\022/\n\016cl" + + "uster_status\030\001 \002(\0132\027.hbase.pb.ClusterSta" + + "tus\"\030\n\026IsMasterRunningRequest\"4\n\027IsMaste" + + "rRunningResponse\022\031\n\021is_master_running\030\001 " + + "\002(\010\"I\n\024ExecProcedureRequest\0221\n\tprocedure" + + "\030\001 \002(\0132\036.hbase.pb.ProcedureDescription\"F", + "\n\025ExecProcedureResponse\022\030\n\020expected_time" + + "out\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n\026IsProc" + + "edureDoneRequest\0221\n\tprocedure\030\001 \001(\0132\036.hb" + + "ase.pb.ProcedureDescription\"`\n\027IsProcedu" + + "reDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\0220\n\010s" + + "napshot\030\002 \001(\0132\036.hbase.pb.ProcedureDescri" + + "ption\",\n\031GetProcedureResultRequest\022\017\n\007pr" + + "oc_id\030\001 \002(\004\"\371\001\n\032GetProcedureResultRespon" + + "se\0229\n\005state\030\001 \002(\0162*.hbase.pb.GetProcedur" + + "eResultResponse.State\022\022\n\nstart_time\030\002 \001(", + "\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006result\030\004 \001(\014\0224" + + "\n\texception\030\005 \001(\0132!.hbase.pb.ForeignExce" + + "ptionMessage\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007" + + "RUNNING\020\001\022\014\n\010FINISHED\020\002\"P\n\025AbortProcedur" + + "eRequest\022\017\n\007proc_id\030\001 \002(\004\022&\n\030may_interru" + + "pt_if_running\030\002 \001(\010:\004true\"6\n\026AbortProced" + + "ureResponse\022\034\n\024is_procedure_aborted\030\001 \002(" + + "\010\"\027\n\025ListProceduresRequest\"@\n\026ListProced" + + "uresResponse\022&\n\tprocedure\030\001 \003(\0132\023.hbase." + + "pb.Procedure\"\315\001\n\017SetQuotaRequest\022\021\n\tuser", + "_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnames" + + "pace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase.p" + + "b.TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypas" + + "s_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hbas" + + "e.pb.ThrottleRequest\"\022\n\020SetQuotaResponse" + + "\"J\n\037MajorCompactionTimestampRequest\022\'\n\nt" + + "able_name\030\001 \002(\0132\023.hbase.pb.TableName\"U\n(" + + "MajorCompactionTimestampForRegionRequest" + + "\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecif" + + "ier\"@\n MajorCompactionTimestampResponse\022", + "\034\n\024compaction_timestamp\030\001 \002(\003\"\035\n\033Securit" + + "yCapabilitiesRequest\"\354\001\n\034SecurityCapabil" + + "itiesResponse\022G\n\014capabilities\030\001 \003(\01621.hb" + + "ase.pb.SecurityCapabilitiesResponse.Capa" + + "bility\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHENTI" + + "CATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n\rA" + + "UTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003\022\023" + + "\n\017CELL_VISIBILITY\020\004\"\233\001\n\023BackupTablesRequ" + + "est\022\"\n\004type\030\001 \002(\0162\024.hbase.pb.BackupType\022" + + "#\n\006tables\030\002 \003(\0132\023.hbase.pb.TableName\022\027\n\017", + "target_root_dir\030\003 \002(\t\022\017\n\007workers\030\004 \001(\003\022\021" + + "\n\tbandwidth\030\005 \001(\003\":\n\024BackupTablesRespons" + + "e\022\017\n\007proc_id\030\001 \001(\004\022\021\n\tbackup_id\030\002 \001(\t*(\n" + + "\020MasterSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012" + + "\312)\n\rMasterService\022e\n\024GetSchemaAlterStatu" + + "s\022%.hbase.pb.GetSchemaAlterStatusRequest" + + "\032&.hbase.pb.GetSchemaAlterStatusResponse" + + "\022b\n\023GetTableDescriptors\022$.hbase.pb.GetTa" + + "bleDescriptorsRequest\032%.hbase.pb.GetTabl" + + "eDescriptorsResponse\022P\n\rGetTableNames\022\036.", + "hbase.pb.GetTableNamesRequest\032\037.hbase.pb" + + ".GetTableNamesResponse\022Y\n\020GetClusterStat" + + "us\022!.hbase.pb.GetClusterStatusRequest\032\"." + + "hbase.pb.GetClusterStatusResponse\022V\n\017IsM" + + "asterRunning\022 .hbase.pb.IsMasterRunningR" + + "equest\032!.hbase.pb.IsMasterRunningRespons" + + "e\022D\n\tAddColumn\022\032.hbase.pb.AddColumnReque" + + "st\032\033.hbase.pb.AddColumnResponse\022M\n\014Delet" + + "eColumn\022\035.hbase.pb.DeleteColumnRequest\032\036" + + ".hbase.pb.DeleteColumnResponse\022M\n\014Modify", + "Column\022\035.hbase.pb.ModifyColumnRequest\032\036." + + "hbase.pb.ModifyColumnResponse\022G\n\nMoveReg" + + "ion\022\033.hbase.pb.MoveRegionRequest\032\034.hbase" + + ".pb.MoveRegionResponse\022k\n\026DispatchMergin" + + "gRegions\022\'.hbase.pb.DispatchMergingRegio" + + "nsRequest\032(.hbase.pb.DispatchMergingRegi" + + "onsResponse\022M\n\014AssignRegion\022\035.hbase.pb.A" + + "ssignRegionRequest\032\036.hbase.pb.AssignRegi" + + "onResponse\022S\n\016UnassignRegion\022\037.hbase.pb." + + "UnassignRegionRequest\032 .hbase.pb.Unassig", + "nRegionResponse\022P\n\rOfflineRegion\022\036.hbase" + + ".pb.OfflineRegionRequest\032\037.hbase.pb.Offl" + + "ineRegionResponse\022J\n\013DeleteTable\022\034.hbase" + + ".pb.DeleteTableRequest\032\035.hbase.pb.Delete" + + "TableResponse\022P\n\rtruncateTable\022\036.hbase.p" + + "b.TruncateTableRequest\032\037.hbase.pb.Trunca" + + "teTableResponse\022J\n\013EnableTable\022\034.hbase.p" + + "b.EnableTableRequest\032\035.hbase.pb.EnableTa" + + "bleResponse\022M\n\014DisableTable\022\035.hbase.pb.D" + + "isableTableRequest\032\036.hbase.pb.DisableTab", + "leResponse\022J\n\013ModifyTable\022\034.hbase.pb.Mod" + + "ifyTableRequest\032\035.hbase.pb.ModifyTableRe" + + "sponse\022J\n\013CreateTable\022\034.hbase.pb.CreateT" + + "ableRequest\032\035.hbase.pb.CreateTableRespon" + + "se\022A\n\010Shutdown\022\031.hbase.pb.ShutdownReques" + + "t\032\032.hbase.pb.ShutdownResponse\022G\n\nStopMas" + + "ter\022\033.hbase.pb.StopMasterRequest\032\034.hbase" + + ".pb.StopMasterResponse\022>\n\007Balance\022\030.hbas" + + "e.pb.BalanceRequest\032\031.hbase.pb.BalanceRe" + + "sponse\022_\n\022SetBalancerRunning\022#.hbase.pb.", + "SetBalancerRunningRequest\032$.hbase.pb.Set" + + "BalancerRunningResponse\022\\\n\021IsBalancerEna" + + "bled\022\".hbase.pb.IsBalancerEnabledRequest" + + "\032#.hbase.pb.IsBalancerEnabledResponse\022k\n" + + "\026SetSplitOrMergeEnabled\022\'.hbase.pb.SetSp" + + "litOrMergeEnabledRequest\032(.hbase.pb.SetS" + + "plitOrMergeEnabledResponse\022h\n\025IsSplitOrM" + + "ergeEnabled\022&.hbase.pb.IsSplitOrMergeEna" + + "bledRequest\032\'.hbase.pb.IsSplitOrMergeEna" + + "bledResponse\022\217\001\n\"ReleaseSplitOrMergeLock", + "AndRollback\0223.hbase.pb.ReleaseSplitOrMer" + + "geLockAndRollbackRequest\0324.hbase.pb.Rele" + + "aseSplitOrMergeLockAndRollbackResponse\022D" + + "\n\tNormalize\022\032.hbase.pb.NormalizeRequest\032" + + "\033.hbase.pb.NormalizeResponse\022e\n\024SetNorma" + + "lizerRunning\022%.hbase.pb.SetNormalizerRun" + + "ningRequest\032&.hbase.pb.SetNormalizerRunn" + + "ingResponse\022b\n\023IsNormalizerEnabled\022$.hba" + + "se.pb.IsNormalizerEnabledRequest\032%.hbase" + + ".pb.IsNormalizerEnabledResponse\022S\n\016RunCa", + "talogScan\022\037.hbase.pb.RunCatalogScanReque" + + "st\032 .hbase.pb.RunCatalogScanResponse\022e\n\024" + + "EnableCatalogJanitor\022%.hbase.pb.EnableCa" + + "talogJanitorRequest\032&.hbase.pb.EnableCat" + + "alogJanitorResponse\022n\n\027IsCatalogJanitorE" + + "nabled\022(.hbase.pb.IsCatalogJanitorEnable" + + "dRequest\032).hbase.pb.IsCatalogJanitorEnab" + + "ledResponse\022^\n\021ExecMasterService\022#.hbase" + + ".pb.CoprocessorServiceRequest\032$.hbase.pb" + + ".CoprocessorServiceResponse\022A\n\010Snapshot\022", + "\031.hbase.pb.SnapshotRequest\032\032.hbase.pb.Sn" + + "apshotResponse\022h\n\025GetCompletedSnapshots\022" + + "&.hbase.pb.GetCompletedSnapshotsRequest\032" + + "\'.hbase.pb.GetCompletedSnapshotsResponse" + + "\022S\n\016DeleteSnapshot\022\037.hbase.pb.DeleteSnap" + + "shotRequest\032 .hbase.pb.DeleteSnapshotRes" + + "ponse\022S\n\016IsSnapshotDone\022\037.hbase.pb.IsSna" + + "pshotDoneRequest\032 .hbase.pb.IsSnapshotDo" + + "neResponse\022V\n\017RestoreSnapshot\022 .hbase.pb" + + ".RestoreSnapshotRequest\032!.hbase.pb.Resto", + "reSnapshotResponse\022P\n\rExecProcedure\022\036.hb" + + "ase.pb.ExecProcedureRequest\032\037.hbase.pb.E" + + "xecProcedureResponse\022W\n\024ExecProcedureWit" + + "hRet\022\036.hbase.pb.ExecProcedureRequest\032\037.h" + + "base.pb.ExecProcedureResponse\022V\n\017IsProce" + + "dureDone\022 .hbase.pb.IsProcedureDoneReque" + + "st\032!.hbase.pb.IsProcedureDoneResponse\022V\n" + + "\017ModifyNamespace\022 .hbase.pb.ModifyNamesp" + + "aceRequest\032!.hbase.pb.ModifyNamespaceRes" + + "ponse\022V\n\017CreateNamespace\022 .hbase.pb.Crea", + "teNamespaceRequest\032!.hbase.pb.CreateName" + + "spaceResponse\022V\n\017DeleteNamespace\022 .hbase" + + ".pb.DeleteNamespaceRequest\032!.hbase.pb.De" + + "leteNamespaceResponse\022k\n\026GetNamespaceDes" + + "criptor\022\'.hbase.pb.GetNamespaceDescripto" + + "rRequest\032(.hbase.pb.GetNamespaceDescript" + + "orResponse\022q\n\030ListNamespaceDescriptors\022)" + + ".hbase.pb.ListNamespaceDescriptorsReques" + + "t\032*.hbase.pb.ListNamespaceDescriptorsRes" + + "ponse\022\206\001\n\037ListTableDescriptorsByNamespac", + "e\0220.hbase.pb.ListTableDescriptorsByNames" + + "paceRequest\0321.hbase.pb.ListTableDescript" + + "orsByNamespaceResponse\022t\n\031ListTableNames" + + "ByNamespace\022*.hbase.pb.ListTableNamesByN" + + "amespaceRequest\032+.hbase.pb.ListTableName" + + "sByNamespaceResponse\022P\n\rGetTableState\022\036." + + "hbase.pb.GetTableStateRequest\032\037.hbase.pb" + + ".GetTableStateResponse\022A\n\010SetQuota\022\031.hba" + + "se.pb.SetQuotaRequest\032\032.hbase.pb.SetQuot" + + "aResponse\022x\n\037getLastMajorCompactionTimes", + "tamp\022).hbase.pb.MajorCompactionTimestamp" + + "Request\032*.hbase.pb.MajorCompactionTimest" + + "ampResponse\022\212\001\n(getLastMajorCompactionTi" + + "mestampForRegion\0222.hbase.pb.MajorCompact" + + "ionTimestampForRegionRequest\032*.hbase.pb." + + "MajorCompactionTimestampResponse\022_\n\022getP" + + "rocedureResult\022#.hbase.pb.GetProcedureRe" + + "sultRequest\032$.hbase.pb.GetProcedureResul" + + "tResponse\022h\n\027getSecurityCapabilities\022%.h" + + "base.pb.SecurityCapabilitiesRequest\032&.hb", + "ase.pb.SecurityCapabilitiesResponse\022S\n\016A" + + "bortProcedure\022\037.hbase.pb.AbortProcedureR" + + "equest\032 .hbase.pb.AbortProcedureResponse" + + "\022S\n\016ListProcedures\022\037.hbase.pb.ListProced" + + "uresRequest\032 .hbase.pb.ListProceduresRes" + + "ponse\022M\n\014backupTables\022\035.hbase.pb.BackupT" + + "ablesRequest\032\036.hbase.pb.BackupTablesResp" + + "onseBB\n*org.apache.hadoop.hbase.protobuf" + + ".generatedB\014MasterProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -66830,6 +68710,18 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor, new java.lang.String[] { "Capabilities", }); + internal_static_hbase_pb_BackupTablesRequest_descriptor = + getDescriptor().getMessageTypes().get(111); + internal_static_hbase_pb_BackupTablesRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_BackupTablesRequest_descriptor, + new java.lang.String[] { "Type", "Tables", "TargetRootDir", "Workers", "Bandwidth", }); + internal_static_hbase_pb_BackupTablesResponse_descriptor = + getDescriptor().getMessageTypes().get(112); + internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_BackupTablesResponse_descriptor, + new java.lang.String[] { "ProcId", "BackupId", }); return null; } }; @@ -66837,6 +68729,7 @@ public final class MasterProtos { .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.ClientProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.getDescriptor(), diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto index 23bbbf8..2360746 100644 --- a/hbase-protocol/src/main/protobuf/Master.proto +++ b/hbase-protocol/src/main/protobuf/Master.proto @@ -27,6 +27,7 @@ option java_generate_equals_and_hash = true; option optimize_for = SPEED; import "HBase.proto"; +import "Backup.proto"; import "Client.proto"; import "ClusterStatus.proto"; import "ErrorHandling.proto"; @@ -498,7 +499,7 @@ message GetProcedureResultResponse { message AbortProcedureRequest { required uint64 proc_id = 1; - optional bool mayInterruptIfRunning = 2 [default = true]; + optional bool may_interrupt_if_running = 2 [default = true]; } message AbortProcedureResponse { @@ -553,6 +554,19 @@ message SecurityCapabilitiesResponse { repeated Capability capabilities = 1; } +message BackupTablesRequest { + required BackupType type = 1; + repeated TableName tables = 2; + required string target_root_dir = 3; + optional int64 workers = 4; + optional int64 bandwidth = 5; +} + +message BackupTablesResponse { + optional uint64 proc_id = 1; + optional string backup_id = 2; +} + service MasterService { /** Used by the client to get the number of regions that have received the updated schema */ rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest) @@ -828,4 +842,8 @@ service MasterService { /** returns a list of procedures */ rpc ListProcedures(ListProceduresRequest) returns(ListProceduresResponse); + + /** backup table set */ + rpc backupTables(BackupTablesRequest) + returns(BackupTablesResponse); } diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index a7954ec..d1c678e 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -394,6 +394,11 @@ ${project.version} true + + org.apache.hadoop + hadoop-distcp + ${hadoop-two.version} + org.apache.httpcomponents httpclient @@ -407,6 +412,11 @@ commons-collections + org.apache.hadoop + hadoop-distcp + ${hadoop-two.version} + + org.apache.hbase hbase-hadoop-compat diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java index ae36f08..d839f74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java @@ -17,7 +17,11 @@ */ package org.apache.hadoop.hbase.coordination; +import java.io.IOException; + import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs; +import org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.Server; @@ -51,8 +55,21 @@ public abstract class BaseCoordinatedStateManager implements CoordinatedStateMan * Method to retrieve coordination for split log worker */ public abstract SplitLogWorkerCoordination getSplitLogWorkerCoordination(); + /** * Method to retrieve coordination for split log manager */ public abstract SplitLogManagerCoordination getSplitLogManagerCoordination(); + /** + * Method to retrieve {@link org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs} + */ + public abstract ProcedureCoordinatorRpcs + getProcedureCoordinatorRpcs(String procType, String coordNode) throws IOException; + + /** + * Method to retrieve {@link org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs} + */ + public abstract ProcedureMemberRpcs + getProcedureMemberRpcs(String procType) throws IOException; + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java index 3e89be7..7cf4aab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java @@ -17,9 +17,15 @@ */ package org.apache.hadoop.hbase.coordination; +import java.io.IOException; + import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs; +import org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs; +import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinatorRpcs; +import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; /** @@ -49,9 +55,21 @@ public class ZkCoordinatedStateManager extends BaseCoordinatedStateManager { @Override public SplitLogWorkerCoordination getSplitLogWorkerCoordination() { return splitLogWorkerCoordination; - } + } + @Override public SplitLogManagerCoordination getSplitLogManagerCoordination() { return splitLogManagerCoordination; } + + @Override + public ProcedureCoordinatorRpcs getProcedureCoordinatorRpcs(String procType, String coordNode) + throws IOException { + return new ZKProcedureCoordinatorRpcs(watcher, procType, coordNode); + } + + @Override + public ProcedureMemberRpcs getProcedureMemberRpcs(String procType) throws IOException { + return new ZKProcedureMemberRpcs(watcher, procType); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java index 02fcbba..2fa7a70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java @@ -27,22 +27,24 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; -import org.apache.hadoop.hbase.wal.WALFactory; -import org.apache.hadoop.hbase.wal.WALKey; +import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WAL.Reader; +import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.hadoop.util.StringUtils; /** * Simple {@link InputFormat} for {@link org.apache.hadoop.hbase.wal.WAL} files. @@ -231,21 +233,30 @@ public class WALInputFormat extends InputFormat { List getSplits(final JobContext context, final String startKey, final String endKey) throws IOException, InterruptedException { Configuration conf = context.getConfiguration(); - Path inputDir = new Path(conf.get("mapreduce.input.fileinputformat.inputdir")); + + Path[] inputPaths = getInputPaths(conf); long startTime = conf.getLong(startKey, Long.MIN_VALUE); long endTime = conf.getLong(endKey, Long.MAX_VALUE); - FileSystem fs = inputDir.getFileSystem(conf); - List files = getFiles(fs, inputDir, startTime, endTime); - - List splits = new ArrayList(files.size()); - for (FileStatus file : files) { + List allFiles = new ArrayList(); + for(Path inputPath: inputPaths){ + FileSystem fs = inputPath.getFileSystem(conf); + List files = getFiles(fs, inputPath, startTime, endTime); + allFiles.addAll(files); + } + List splits = new ArrayList(allFiles.size()); + for (FileStatus file : allFiles) { splits.add(new WALSplit(file.getPath().toString(), file.getLen(), startTime, endTime)); } return splits; } + private Path[] getInputPaths(Configuration conf) { + String inpDirs = conf.get("mapreduce.input.fileinputformat.inputdir"); + return StringUtils.stringToPath(inpDirs.split(",")); + } + private List getFiles(FileSystem fs, Path dir, long startTime, long endTime) throws IOException { List result = new ArrayList(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index c6fefb2..6207d77 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -48,6 +48,8 @@ import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WALKey; +import org.apache.hadoop.mapreduce.Counter; +import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; @@ -70,9 +72,9 @@ import org.apache.hadoop.util.ToolRunner; public class WALPlayer extends Configured implements Tool { private static final Log LOG = LogFactory.getLog(WALPlayer.class); final static String NAME = "WALPlayer"; - final static String BULK_OUTPUT_CONF_KEY = "wal.bulk.output"; - final static String TABLES_KEY = "wal.input.tables"; - final static String TABLE_MAP_KEY = "wal.input.tablesmap"; + public final static String BULK_OUTPUT_CONF_KEY = "wal.bulk.output"; + public final static String TABLES_KEY = "wal.input.tables"; + public final static String TABLE_MAP_KEY = "wal.input.tablesmap"; // This relies on Hadoop Configuration to handle warning about deprecated configs and // to set the correct non-deprecated configs when an old one shows up. @@ -86,6 +88,9 @@ public class WALPlayer extends Configured implements Tool { private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name"; + public WALPlayer(){ + } + protected WALPlayer(final Configuration c) { super(c); } @@ -95,7 +100,7 @@ public class WALPlayer extends Configured implements Tool { * This one can be used together with {@link KeyValueSortReducer} */ static class WALKeyValueMapper - extends Mapper { + extends Mapper { private byte[] table; @Override @@ -107,7 +112,9 @@ public class WALPlayer extends Configured implements Tool { if (Bytes.equals(table, key.getTablename().getName())) { for (Cell cell : value.getCells()) { KeyValue kv = KeyValueUtil.ensureKeyValue(cell); - if (WALEdit.isMetaEditFamily(kv)) continue; + if (WALEdit.isMetaEditFamily(kv)) { + continue; + } context.write(new ImmutableBytesWritable(CellUtil.cloneRow(kv)), kv); } } @@ -133,9 +140,10 @@ public class WALPlayer extends Configured implements Tool { * a running HBase instance. */ protected static class WALMapper - extends Mapper { + extends Mapper { private Map tables = new TreeMap(); - + + @Override public void map(WALKey key, WALEdit value, Context context) throws IOException { @@ -148,9 +156,12 @@ public class WALPlayer extends Configured implements Tool { Put put = null; Delete del = null; Cell lastCell = null; + for (Cell cell : value.getCells()) { // filtering WAL meta entries - if (WALEdit.isMetaEditFamily(cell)) continue; + if (WALEdit.isMetaEditFamily(cell)) { + continue; + } // Allow a subclass filter out this cell. if (filter(context, cell)) { @@ -161,8 +172,12 @@ public class WALPlayer extends Configured implements Tool { if (lastCell == null || lastCell.getTypeByte() != cell.getTypeByte() || !CellUtil.matchingRow(lastCell, cell)) { // row or type changed, write out aggregate KVs. - if (put != null) context.write(tableOut, put); - if (del != null) context.write(tableOut, del); + if (put != null) { + context.write(tableOut, put); + } + if (del != null) { + context.write(tableOut, del); + } if (CellUtil.isDelete(cell)) { del = new Delete(CellUtil.cloneRow(cell)); } else { @@ -178,8 +193,12 @@ public class WALPlayer extends Configured implements Tool { lastCell = cell; } // write residual KVs - if (put != null) context.write(tableOut, put); - if (del != null) context.write(tableOut, del); + if (put != null) { + context.write(tableOut, put); + } + if (del != null) { + context.write(tableOut, del); + } } } catch (InterruptedException e) { e.printStackTrace(); @@ -187,7 +206,8 @@ public class WALPlayer extends Configured implements Tool { } /** - * @param cell + * Filter cell + * @param cell cell * @return Return true if we are to emit this cell. */ protected boolean filter(Context context, final Cell cell) { @@ -195,12 +215,17 @@ public class WALPlayer extends Configured implements Tool { } @Override + protected void + cleanup(Mapper.Context context) + throws IOException, InterruptedException { + super.cleanup(context); + } + + @Override public void setup(Context context) throws IOException { String[] tableMap = context.getConfiguration().getStrings(TABLE_MAP_KEY); String[] tablesToUse = context.getConfiguration().getStrings(TABLES_KEY); - if (tablesToUse == null && tableMap == null) { - // Then user wants all tables. - } else if (tablesToUse == null || tableMap == null || tablesToUse.length != tableMap.length) { + if (tablesToUse == null || tableMap == null || tablesToUse.length != tableMap.length) { // this can only happen when WALMapper is used directly by a class other than WALPlayer throw new IOException("No tables or incorrect table mapping specified."); } @@ -216,7 +241,9 @@ public class WALPlayer extends Configured implements Tool { void setupTime(Configuration conf, String option) throws IOException { String val = conf.get(option); - if (null == val) return; + if (null == val) { + return; + } long ms; try { // first try to parse in user friendly form @@ -246,7 +273,7 @@ public class WALPlayer extends Configured implements Tool { Configuration conf = getConf(); setupTime(conf, HLogInputFormat.START_TIME_KEY); setupTime(conf, HLogInputFormat.END_TIME_KEY); - Path inputDir = new Path(args[0]); + String inputDirs = args[0]; String[] tables = args[1].split(","); String[] tableMap; if (args.length > 2) { @@ -260,13 +287,18 @@ public class WALPlayer extends Configured implements Tool { } conf.setStrings(TABLES_KEY, tables); conf.setStrings(TABLE_MAP_KEY, tableMap); - Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + inputDir)); + Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + System.currentTimeMillis())); job.setJarByClass(WALPlayer.class); - FileInputFormat.setInputPaths(job, inputDir); + + FileInputFormat.addInputPaths(job, inputDirs); + job.setInputFormatClass(WALInputFormat.class); job.setMapOutputKeyClass(ImmutableBytesWritable.class); + String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY); if (hfileOutPath != null) { + LOG.debug("add incremental job :"+hfileOutPath); + // the bulk HFile case if (tables.length != 1) { throw new IOException("Exactly one table must be specified for the bulk export option"); @@ -282,6 +314,8 @@ public class WALPlayer extends Configured implements Tool { RegionLocator regionLocator = conn.getRegionLocator(tableName)) { HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator); } + LOG.debug("success configuring load incremental job"); + TableMapReduceUtil.addDependencyJars(job.getConfiguration(), com.google.common.base.Preconditions.class); } else { @@ -302,7 +336,9 @@ public class WALPlayer extends Configured implements Tool { return job; } - /* + + /** + * Print usage * @param errorMsg Error message. Can be null. */ private void usage(final String errorMsg) { @@ -312,7 +348,8 @@ public class WALPlayer extends Configured implements Tool { System.err.println("Usage: " + NAME + " [options] []"); System.err.println("Read all WAL entries for ."); System.err.println("If no tables (\"\") are specific, all tables are imported."); - System.err.println("(Careful, even -ROOT- and hbase:meta entries will be imported in that case.)"); + System.err.println("(Careful, even -ROOT- and hbase:meta entries will be imported"+ + " in that case.)"); System.err.println("Otherwise is a comma separated list of tables.\n"); System.err.println("The WAL entries can be mapped to new set of tables via ."); System.err.println(" is a command separated list of targettables."); @@ -325,10 +362,10 @@ public class WALPlayer extends Configured implements Tool { System.err.println(" -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]"); System.err.println(" -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]"); System.err.println(" -D " + JOB_NAME_CONF_KEY - + "=jobName - use the specified mapreduce job name for the wal player"); + + "=jobName - use the specified mapreduce job name for the wal player"); System.err.println("For performance also consider the following options:\n" - + " -Dmapreduce.map.speculative=false\n" - + " -Dmapreduce.reduce.speculative=false"); + + " -Dmapreduce.map.speculative=false\n" + + " -Dmapreduce.reduce.speculative=false"); } /** @@ -349,6 +386,7 @@ public class WALPlayer extends Configured implements Tool { System.exit(-1); } Job job = createSubmittableJob(args); - return job.waitForCompletion(true) ? 0 : 1; + int result =job.waitForCompletion(true) ? 0 : 1; + return result; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 9196368..93a5afd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -53,6 +53,7 @@ import javax.servlet.http.HttpServletResponse; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.CoordinatedStateException; @@ -78,6 +79,13 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.impl.BackupManager; +import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.master.FullTableBackupProcedure; +import org.apache.hadoop.hbase.backup.master.IncrementalTableBackupProcedure; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.Result; @@ -147,6 +155,7 @@ import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CompressionTest; import org.apache.hadoop.hbase.util.EncryptionTest; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.util.HasThread; @@ -165,6 +174,7 @@ import org.apache.hadoop.hbase.zookeeper.SplitOrMergeTracker; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.util.StringUtils; import org.apache.zookeeper.KeeperException; import org.mortbay.jetty.Connector; import org.mortbay.jetty.nio.SelectChannelConnector; @@ -406,6 +416,7 @@ public class HMaster extends HRegionServer implements MasterServices { this.conf.setBoolean(HConstants.USE_META_REPLICAS, false); Replication.decorateMasterConfiguration(this.conf); + BackupManager.decorateMasterConfiguration(this.conf); // Hack! Maps DFSClient => Master for logs. HDFS made this // config param for task trackers, but we can piggyback off of it. @@ -2547,6 +2558,90 @@ public class HMaster extends HRegionServer implements MasterServices { return procInfoList; } + @Override + public Pair backupTables(final BackupType type, + List tableList, final String targetRootDir, final int workers, + final long bandwidth) throws IOException { + long procId; + String backupId = BackupRestoreConstants.BACKUPID_PREFIX + + EnvironmentEdgeManager.currentTime(); + if (type == BackupType.INCREMENTAL) { + Set incrTableSet = null; + try (BackupSystemTable table = new BackupSystemTable(getConnection())) { + incrTableSet = table.getIncrementalBackupTableSet(targetRootDir); + } + + if (incrTableSet.isEmpty()) { + LOG.warn("Incremental backup table set contains no table.\n" + + "Use 'backup create full' or 'backup stop' to \n " + + "change the tables covered by incremental backup."); + throw new DoNotRetryIOException("No table covered by incremental backup."); + } + + tableList.removeAll(incrTableSet); + if (!tableList.isEmpty()) { + String extraTables = StringUtils.join(",", tableList); + LOG.error("Some tables (" + extraTables + ") haven't gone through full backup"); + throw new DoNotRetryIOException("Perform full backup on " + extraTables + " first, " + + "then retry the command"); + } + LOG.info("Incremental backup for the following table set: " + incrTableSet); + tableList = Lists.newArrayList(incrTableSet); + } + if (tableList != null && !tableList.isEmpty()) { + for (TableName table : tableList) { + String targetTableBackupDir = + HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table); + Path targetTableBackupDirPath = new Path(targetTableBackupDir); + FileSystem outputFs = FileSystem.get(targetTableBackupDirPath.toUri(), conf); + if (outputFs.exists(targetTableBackupDirPath)) { + throw new DoNotRetryIOException("Target backup directory " + targetTableBackupDir + + " exists already."); + } + } + ArrayList nonExistingTableList = null; + for (TableName tableName : tableList) { + if (!MetaTableAccessor.tableExists(getConnection(), tableName)) { + if (nonExistingTableList == null) { + nonExistingTableList = new ArrayList<>(); + } + nonExistingTableList.add(tableName); + } + } + if (nonExistingTableList != null) { + if (type == BackupType.INCREMENTAL ) { + LOG.warn("Incremental backup table set contains non-exising table: " + + nonExistingTableList); + // Update incremental backup set + tableList = excludeNonExistingTables(tableList, nonExistingTableList); + } else { + // Throw exception only in full mode - we try to backup non-existing table + throw new DoNotRetryIOException("Non-existing tables found in the table list: " + + nonExistingTableList); + } + } + } + if (type == BackupType.FULL) { + procId = this.procedureExecutor.submitProcedure( + new FullTableBackupProcedure(procedureExecutor.getEnvironment(), backupId, + tableList, targetRootDir, workers, bandwidth)); + } else { + procId = this.procedureExecutor.submitProcedure( + new IncrementalTableBackupProcedure(procedureExecutor.getEnvironment(), backupId, + tableList, targetRootDir, workers, bandwidth)); + } + return new Pair<>(procId, backupId); + } + + private List excludeNonExistingTables(List tableList, + List nonExistingTableList) { + + for(TableName table: nonExistingTableList) { + tableList.remove(table); + } + return tableList; + } + /** * Returns the list of table descriptors that match the specified request * @param namespace the namespace to query, or null if querying for all diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 986ff6e..5261f7c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownRegionException; +import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.TableState; @@ -1003,6 +1004,25 @@ public class MasterRpcServices extends RSRpcServices } @Override + public MasterProtos.BackupTablesResponse backupTables( + RpcController controller, + MasterProtos.BackupTablesRequest request) throws ServiceException { + try { + BackupTablesResponse.Builder response = BackupTablesResponse.newBuilder(); + List tablesList = new ArrayList<>(request.getTablesList().size()); + for (HBaseProtos.TableName table : request.getTablesList()) { + tablesList.add(ProtobufUtil.toTableName(table)); + } + Pair pair = master.backupTables( + BackupType.valueOf(request.getType().name()), tablesList, request.getTargetRootDir(), + (int)request.getWorkers(), request.getBandwidth()); + return response.setProcId(pair.getFirst()).setBackupId(pair.getSecond()).build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + + @Override public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c, ListTableDescriptorsByNamespaceRequest request) throws ServiceException { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 9b91572..cffcfdb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; import java.util.List; +import java.util.concurrent.Future; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -30,6 +31,7 @@ import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer; @@ -38,6 +40,7 @@ import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.quotas.MasterQuotaManager; +import org.apache.hadoop.hbase.util.Pair; import com.google.protobuf.Service; @@ -188,6 +191,23 @@ public interface MasterServices extends Server { throws IOException; /** + * Full backup given list of tables + * @param type whether the backup is full or incremental + * @param tableList list of tables to backup + * @param targetRootDir root dir for saving the backup + * @param workers number of paralle workers. -1 - system defined + * @param bandwidth bandwidth per worker in MB per sec. -1 - unlimited + * @return pair of procedure Id and backupId + * @throws IOException + */ + public Pair backupTables( + final BackupType type, + List tableList, + final String targetRootDir, + final int workers, + final long bandwidth) throws IOException; + + /** * Enable an existing table * @param tableName The table name * @param nonceGroup diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java index fa0c366..b76e314 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java @@ -18,12 +18,23 @@ package org.apache.hadoop.hbase.master.procedure; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.util.Map; +import java.util.Map.Entry; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.procedure.MasterProcedureManager; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation; import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.security.UserGroupInformation; @InterfaceAudience.Private @@ -54,4 +65,62 @@ public final class MasterProcedureUtil { } return null; } + + public static ProcedureDescription buildProcedure(String signature, String instance, + Map props) { + ProcedureDescription.Builder builder = ProcedureDescription.newBuilder(); + builder.setSignature(signature).setInstance(instance); + for (Entry entry : props.entrySet()) { + NameStringPair pair = NameStringPair.newBuilder().setName(entry.getKey()) + .setValue(entry.getValue()).build(); + builder.addConfiguration(pair); + } + ProcedureDescription desc = builder.build(); + return desc; + } + + public static long execProcedure(MasterProcedureManager mpm, String signature, String instance, + Map props) throws IOException { + if (mpm == null) { + throw new IOException("The procedure is not registered: " + signature); + } + ProcedureDescription desc = buildProcedure(signature, instance, props); + mpm.execProcedure(desc); + + // send back the max amount of time the client should wait for the procedure + // to complete + long waitTime = SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME; + return waitTime; + } + + public static void waitForProcedure(MasterProcedureManager mpm, String signature, String instance, + Map props, long max, int numRetries, long pause) throws IOException { + ProcedureDescription desc = buildProcedure(signature, instance, props); + long start = EnvironmentEdgeManager.currentTime(); + long maxPauseTime = max / numRetries; + int tries = 0; + LOG.debug("Waiting a max of " + max + " ms for procedure '" + + signature + " : " + instance + "'' to complete. (max " + maxPauseTime + " ms per retry)"); + boolean done = false; + while (tries == 0 + || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) { + try { + // sleep a backoff <= pauseTime amount + long sleep = HBaseAdmin.getPauseTime(tries++, pause); + sleep = sleep > maxPauseTime ? maxPauseTime : sleep; + LOG.debug("(#" + tries + ") Sleeping: " + sleep + + "ms while waiting for procedure completion."); + Thread.sleep(sleep); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException("Interrupted").initCause(e); + } + LOG.debug("Getting current status of procedure from master..."); + done = mpm.isProcedureDone(desc); + } + if (!done) { + throw new IOException("Procedure '" + signature + " : " + instance + + "' wasn't completed in expectedTime:" + max + " ms"); + } + + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java index deaf406..78b3dac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java @@ -32,6 +32,7 @@ public interface TableProcedureInterface { public enum TableOperationType { CREATE, DELETE, DISABLE, EDIT, ENABLE, READ, SPLIT, MERGE, ASSIGN, UNASSIGN, /* region operations */ + BACKUP }; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java index 95c3ffe..b6e11ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManager.java @@ -37,7 +37,7 @@ public abstract class RegionServerProcedureManager extends ProcedureManager { * @param rss Region Server service interface * @throws KeeperException */ - public abstract void initialize(RegionServerServices rss) throws KeeperException; + public abstract void initialize(RegionServerServices rss) throws IOException; /** * Start accepting procedure requests. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java index 0f4ea64..adb3604 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/RegionServerProcedureManagerHost.java @@ -25,7 +25,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.procedure.flush.RegionServerFlushTableProcedureManager; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager; -import org.apache.zookeeper.KeeperException; /** * Provides the globally barriered procedure framework and environment @@ -39,7 +38,7 @@ public class RegionServerProcedureManagerHost extends private static final Log LOG = LogFactory .getLog(RegionServerProcedureManagerHost.class); - public void initialize(RegionServerServices rss) throws KeeperException { + public void initialize(RegionServerServices rss) throws IOException { for (RegionServerProcedureManager proc : procedures) { LOG.debug("Procedure " + proc.getProcedureSignature() + " is initializing"); proc.initialize(rss); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java index 085d642..3865ba9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureCoordinatorRpcs.java @@ -54,7 +54,7 @@ public class ZKProcedureCoordinatorRpcs implements ProcedureCoordinatorRpcs { * @throws KeeperException if an unexpected zk error occurs */ public ZKProcedureCoordinatorRpcs(ZooKeeperWatcher watcher, - String procedureClass, String coordName) throws KeeperException { + String procedureClass, String coordName) throws IOException { this.watcher = watcher; this.procedureType = procedureClass; this.coordName = coordName; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java index 2e03a60..9b491fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java @@ -68,49 +68,53 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs { * @throws KeeperException if we can't reach zookeeper */ public ZKProcedureMemberRpcs(final ZooKeeperWatcher watcher, final String procType) - throws KeeperException { - this.zkController = new ZKProcedureUtil(watcher, procType) { - @Override - public void nodeCreated(String path) { - if (!isInProcedurePath(path)) { - return; - } + throws IOException { + try { + this.zkController = new ZKProcedureUtil(watcher, procType) { + @Override + public void nodeCreated(String path) { + if (!isInProcedurePath(path)) { + return; + } - LOG.info("Received created event:" + path); - // if it is a simple start/end/abort then we just rewatch the node - if (isAcquiredNode(path)) { - waitForNewProcedures(); - return; - } else if (isAbortNode(path)) { - watchForAbortedProcedures(); - return; + LOG.info("Received created event:" + path); + // if it is a simple start/end/abort then we just rewatch the node + if (isAcquiredNode(path)) { + waitForNewProcedures(); + return; + } else if (isAbortNode(path)) { + watchForAbortedProcedures(); + return; + } + String parent = ZKUtil.getParent(path); + // if its the end barrier, the procedure can be completed + if (isReachedNode(parent)) { + receivedReachedGlobalBarrier(path); + return; + } else if (isAbortNode(parent)) { + abort(path); + return; + } else if (isAcquiredNode(parent)) { + startNewSubprocedure(path); + } else { + LOG.debug("Ignoring created notification for node:" + path); + } } - String parent = ZKUtil.getParent(path); - // if its the end barrier, the procedure can be completed - if (isReachedNode(parent)) { - receivedReachedGlobalBarrier(path); - return; - } else if (isAbortNode(parent)) { - abort(path); - return; - } else if (isAcquiredNode(parent)) { - startNewSubprocedure(path); - } else { - LOG.debug("Ignoring created notification for node:" + path); - } - } - @Override - public void nodeChildrenChanged(String path) { - if (path.equals(this.acquiredZnode)) { - LOG.info("Received procedure start children changed event: " + path); - waitForNewProcedures(); - } else if (path.equals(this.abortZnode)) { - LOG.info("Received procedure abort children changed event: " + path); - watchForAbortedProcedures(); + @Override + public void nodeChildrenChanged(String path) { + if (path.equals(this.acquiredZnode)) { + LOG.info("Received procedure start children changed event: " + path); + waitForNewProcedures(); + } else if (path.equals(this.abortZnode)) { + LOG.info("Received procedure abort children changed event: " + path); + watchForAbortedProcedures(); + } } - } - }; + }; + } catch (KeeperException e) { + throw new IOException(e); + } } public ZKProcedureUtil getZkController() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java index 1aa959c..bd65cc7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java @@ -317,7 +317,7 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur * @throws KeeperException if the zookeeper cannot be reached */ @Override - public void initialize(RegionServerServices rss) throws KeeperException { + public void initialize(RegionServerServices rss) throws IOException { this.rss = rss; ZooKeeperWatcher zkw = rss.getZooKeeper(); this.memberRpcs = new ZKProcedureMemberRpcs(zkw, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index e03993f..3f4545f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -88,6 +88,7 @@ import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.YouAreDeadException; import org.apache.hadoop.hbase.ZNodeClearer; +import org.apache.hadoop.hbase.backup.impl.BackupManager; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; @@ -526,7 +527,7 @@ public class HRegionServer extends HasThread implements FSUtils.setupShortCircuitRead(this.conf); // Disable usage of meta replicas in the regionserver this.conf.setBoolean(HConstants.USE_META_REPLICAS, false); - + BackupManager.decorateRSConfiguration(conf); // Config'ed params this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); @@ -836,8 +837,8 @@ public class HRegionServer extends HasThread implements rspmHost = new RegionServerProcedureManagerHost(); rspmHost.loadProcedures(conf); rspmHost.initialize(this); - } catch (KeeperException e) { - this.abort("Failed to reach zk cluster when creating procedure handler.", e); + } catch (IOException e) { + this.abort("Failed to reach coordination cluster when creating procedure handler.", e); } // register watcher for recovering regions this.recoveringRegionWatcher = new RecoveringRegionWatcher(this.zooKeeper, this); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java index 537329a..e56dd28 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java @@ -390,7 +390,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { * @throws KeeperException if the zookeeper cluster cannot be reached */ @Override - public void initialize(RegionServerServices rss) throws KeeperException { + public void initialize(RegionServerServices rss) throws IOException { this.rss = rss; ZooKeeperWatcher zkw = rss.getZooKeeper(); this.memberRpcs = new ZKProcedureMemberRpcs(zkw, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index cdf5757..608fc63 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -68,6 +68,8 @@ import org.apache.htrace.Span; import org.apache.htrace.Trace; import org.apache.htrace.TraceScope; + + /** * The default implementation of FSWAL. */ @@ -776,7 +778,15 @@ public class FSHLog extends AbstractFSWAL { scope.close(); } } - + + /** + * To support old API compatibility + * @return current file number (timestamp) + */ + public long getFilenum() { + return filenum.get(); + } + @Override public void logRollerExited() { } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index cb78837..823f9f4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagRewriteCell; import org.apache.hadoop.hbase.TagUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index 3fa3fa3..c0e80b1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.TagRewriteCell; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.TagUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java index e495e99..fe19fd4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java @@ -209,6 +209,11 @@ public abstract class AbstractFSWALProvider> implemen @VisibleForTesting public static long extractFileNumFromWAL(final WAL wal) { final Path walName = ((AbstractFSWAL) wal).getCurrentFileName(); + return extractFileNumFromWAL(walName); + } + + @VisibleForTesting + public static long extractFileNumFromWAL(final Path walName) { if (walName == null) { throw new IllegalArgumentException("The WAL path couldn't be null"); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index a6dc59f..dc9945f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -3231,6 +3231,16 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } /** + * Waith until all system table's regions get assigned + * @throws IOException + */ + public void waitUntilAllSystemRegionsAssigned() throws IOException { + waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); + waitUntilAllRegionsAssigned(TableName.NAMESPACE_TABLE_NAME); + waitUntilAllRegionsAssigned(TableName.BACKUP_TABLE_NAME); + } + + /** * Wait until all regions for a table in hbase:meta have a non-empty * info:server, or until timeout. This means all regions have been deployed, * master has been informed and updated hbase:meta with the regions deployed diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java index d778fa9..3960004 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java @@ -25,6 +25,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; +import java.util.HashSet; import java.util.Set; import java.util.concurrent.Callable; @@ -111,13 +112,16 @@ public class TestNamespace { //verify existence of system tables Set systemTables = Sets.newHashSet( TableName.META_TABLE_NAME, - TableName.NAMESPACE_TABLE_NAME); + TableName.NAMESPACE_TABLE_NAME, + TableName.BACKUP_TABLE_NAME); HTableDescriptor[] descs = admin.listTableDescriptorsByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE.getName()); - assertEquals(systemTables.size(), descs.length); + assertTrue(descs.length >= systemTables.size()); + Set tables = new HashSet<>(); for (HTableDescriptor desc : descs) { - assertTrue(systemTables.contains(desc.getTableName())); + tables.add(desc.getTableName()); } + assertTrue(tables.containsAll(systemTables)); //verify system tables aren't listed assertEquals(0, admin.listTables().length); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java index 8e87ceb..5ac255b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java @@ -83,6 +83,9 @@ public class TestMetaWithReplicas { TEST_UTIL.getConfiguration().setInt( StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 1000); TEST_UTIL.startMiniCluster(3); + + TEST_UTIL.waitUntilAllSystemRegionsAssigned(); + // disable the balancer LoadBalancerTracker l = new LoadBalancerTracker(TEST_UTIL.getZooKeeperWatcher(), new Abortable() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java index f5d2a20..762ee36 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java @@ -22,6 +22,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.backup.master.BackupController; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.TestServerCustomProtocol; @@ -69,6 +70,7 @@ public class TestClassLoading { private static Class regionCoprocessor2 = TestServerCustomProtocol.PingHandler.class; private static Class regionServerCoprocessor = SampleRegionWALObserver.class; private static Class masterCoprocessor = BaseMasterObserver.class; + private static Class backupCoprocessor = BackupController.class; private static final String[] regionServerSystemCoprocessors = new String[]{ @@ -532,7 +534,7 @@ public class TestClassLoading { // to master: verify that the master is reporting the correct set of // loaded coprocessors. final String loadedMasterCoprocessorsVerify = - "[" + masterCoprocessor.getSimpleName() + "]"; + "[" + backupCoprocessor.getSimpleName() + ", " + masterCoprocessor.getSimpleName() + "]"; String loadedMasterCoprocessors = java.util.Arrays.toString( TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessors()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java index 061068c..4a10ff9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java @@ -107,6 +107,7 @@ public class TestMasterCoprocessorExceptionWithAbort { HTableDescriptor desc, HRegionInfo[] regions) throws IOException { // cause a NullPointerException and don't catch it: this will cause the // master to abort(). + if (desc.getTableName().isSystemTable()) return; Integer i; i = null; i = i++; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java index bbb855c..c966d09 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java @@ -85,6 +85,7 @@ public class TestMasterCoprocessorExceptionWithRemove { // Cause a NullPointerException and don't catch it: this should cause the // master to throw an o.apache.hadoop.hbase.DoNotRetryIOException to the // client. + if (desc.getTableName().isSystemTable()) return; Integer i; i = null; i = i++; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index c7a42d9..26f44d0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer; @@ -40,6 +41,7 @@ import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.quotas.MasterQuotaManager; import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.mockito.Mockito; @@ -206,6 +208,15 @@ public class MockNoopMasterServices implements MasterServices, Server { } @Override + public Pair backupTables( + final BackupType type, + final List tableList, + final String targetRootDir, final int workers, + final long bandwidth) throws IOException { + return null; + } + + @Override public List listTableDescriptorsByNamespace(String name) throws IOException { return null; //To change body of implemented methods use File | Settings | File Templates. } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 35a3a79..0aac97d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; @@ -58,6 +59,8 @@ import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination; import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLogManagerDetails; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator; +import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; @@ -74,6 +77,7 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; +import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Triple; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -220,6 +224,16 @@ public class TestCatalogJanitor { } @Override + public SnapshotManager getSnapshotManager() { + return null; + } + + @Override + public MasterProcedureManagerHost getMasterProcedureManagerHost() { + return null; + } + + @Override public AssignmentManager getAssignmentManager() { return this.asm; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java index 002438a..0e83495 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java @@ -1500,14 +1500,14 @@ public class TestDistributedLogSplitting { for (String oregion : regions) LOG.debug("Region still online: " + oregion); } - assertEquals(2 + existingRegions, regions.size()); + assertTrue(2 + existingRegions <= regions.size()); LOG.debug("Enabling table\n"); TEST_UTIL.getHBaseAdmin().enableTable(table); LOG.debug("Waiting for no more RIT\n"); blockUntilNoRIT(zkw, master); LOG.debug("Verifying there are " + numRegions + " assigned on cluster\n"); regions = HBaseTestingUtility.getAllOnlineRegions(cluster); - assertEquals(numRegions + 2 + existingRegions, regions.size()); + assertTrue(numRegions + 2 + existingRegions <= regions.size()); return ht; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java index a20327d..ff98292 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java @@ -238,9 +238,9 @@ public class TestMasterFailover { log("Regions in hbase:meta and namespace have been created"); - // at this point we only expect 3 regions to be assigned out - // (catalogs and namespace, + 1 online region) - assertEquals(3, cluster.countServedRegions()); + // at this point we expect at least 3 regions to be assigned out + // (meta and namespace, + 1 online region) + assertTrue(3 <= cluster.countServedRegions()); HRegionInfo hriOnline = null; try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(TableName.valueOf("onlineTable"))) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java index 7a4baf3..ce66ee4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java @@ -77,6 +77,7 @@ public class TestMasterOperationsForRegionReplicas { while(ADMIN.getClusterStatus().getServers().size() < numSlaves) { Thread.sleep(100); } + TEST_UTIL.waitUntilAllSystemRegionsAssigned(); } @AfterClass @@ -305,7 +306,7 @@ public class TestMasterOperationsForRegionReplicas { connection); snapshot.initialize(); Map regionToServerMap = snapshot.getRegionToRegionServerMap(); - assert(regionToServerMap.size() == numRegions * numReplica + 1); //'1' for the namespace + assert(regionToServerMap.size() == numRegions * numReplica + 2); //'1' for the namespace Map> serverToRegionMap = snapshot.getRegionServerToRegionMap(); for (Map.Entry> entry : serverToRegionMap.entrySet()) { if (entry.getKey().equals(util.getHBaseCluster().getMaster().getServerName())) { @@ -332,14 +333,14 @@ public class TestMasterOperationsForRegionReplicas { connection); snapshot.initialize(); Map regionToServerMap = snapshot.getRegionToRegionServerMap(); - assertEquals(regionToServerMap.size(), numRegions * numReplica + 1); //'1' for the namespace + assertEquals(regionToServerMap.size(), numRegions * numReplica + 2); //'2' for the ns, backup Map> serverToRegionMap = snapshot.getRegionServerToRegionMap(); assertEquals(serverToRegionMap.keySet().size(), 2); // 1 rs + 1 master for (Map.Entry> entry : serverToRegionMap.entrySet()) { if (entry.getKey().equals(TEST_UTIL.getHBaseCluster().getMaster().getServerName())) { continue; } - assertEquals(entry.getValue().size(), numRegions * numReplica); + assertEquals(entry.getValue().size(), numRegions * numReplica +1); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java index b62279e..0ec4037 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java @@ -79,9 +79,12 @@ public class TestMasterRestartAfterDisablingTable { TEST_UTIL.getHBaseAdmin().disableTable(table); NavigableSet regions = HBaseTestingUtility.getAllOnlineRegions(cluster); - assertEquals( - "The number of regions for the table tableRestart should be 0 and only" - + "the catalog and namespace tables should be present.", 2, regions.size()); + for (String region : regions) { + assertTrue( + "The number of regions for the table tableRestart should be 0 and only" + + "the catalog and namespace tables should be present.", + !region.startsWith(table.getNameAsString())); + } List masterThreads = cluster.getMasterThreads(); MasterThread activeMaster = null; @@ -110,7 +113,7 @@ public class TestMasterRestartAfterDisablingTable { regions = HBaseTestingUtility.getAllOnlineRegions(cluster); assertEquals("The assigned regions were not onlined after master" + " switch except for the catalog and namespace tables.", - 6, regions.size()); + 7, regions.size()); assertTrue("The table should be in enabled state", cluster.getMaster().getTableStateManager() .isTableState(TableName.valueOf("tableRestart"), TableState.State.ENABLED)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java index 7c41c0f..6927072 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java @@ -75,7 +75,7 @@ public class TestRestartCluster { } List allRegions = MetaTableAccessor.getAllRegions(UTIL.getConnection(), false); - assertEquals(4, allRegions.size()); + assertTrue(4 <= allRegions.size()); LOG.info("\n\nShutting down cluster"); UTIL.shutdownMiniHBaseCluster(); @@ -90,7 +90,7 @@ public class TestRestartCluster { // Otherwise we're reusing an Connection that has gone stale because // the shutdown of the cluster also called shut of the connection. allRegions = MetaTableAccessor.getAllRegions(UTIL.getConnection(), false); - assertEquals(4, allRegions.size()); + assertTrue(4 <= allRegions.size()); LOG.info("\n\nWaiting for tables to be available"); for(TableName TABLE: TABLES) { try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java index 251e022..c9de2c4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java @@ -93,7 +93,7 @@ public class TestRollingRestart { if (regions.size() != 2) { for (String oregion : regions) log("Region still online: " + oregion); } - assertEquals(2, regions.size()); + assertEquals(3, regions.size()); log("Enabling table\n"); TEST_UTIL.getHBaseAdmin().enableTable(table); log("Waiting for no more RIT\n"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java index 7620bbb..cd2efad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java @@ -49,7 +49,7 @@ public class SimpleRSProcedureManager extends RegionServerProcedureManager { private ProcedureMember member; @Override - public void initialize(RegionServerServices rss) throws KeeperException { + public void initialize(RegionServerServices rss) throws IOException { this.rss = rss; ZooKeeperWatcher zkw = rss.getZooKeeper(); this.memberRpcs = new ZKProcedureMemberRpcs(zkw, getProcedureSignature()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java index f49bdb1..f6e282a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionOpen.java @@ -52,6 +52,7 @@ public class TestRegionOpen { @BeforeClass public static void before() throws Exception { HTU.startMiniCluster(NB_SERVERS); + HTU.waitUntilAllSystemRegionsAssigned(); } @AfterClass @@ -68,7 +69,7 @@ public class TestRegionOpen { ThreadPoolExecutor exec = getRS().getExecutorService() .getExecutorThreadPool(ExecutorType.RS_OPEN_PRIORITY_REGION); - assertEquals(0, exec.getCompletedTaskCount()); + long taskCount = exec.getCompletedTaskCount(); // System table regions HTableDescriptor htd = new HTableDescriptor(tableName); htd.setPriority(HConstants.HIGH_QOS); @@ -78,6 +79,6 @@ public class TestRegionOpen { admin.createTable(htd); } - assertEquals(1, exec.getCompletedTaskCount()); + assertEquals(taskCount+1, exec.getCompletedTaskCount()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java index 89a82a7..340922a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java @@ -99,6 +99,7 @@ public class TestRegionServerMetrics { while (cluster.getLiveRegionServerThreads().size() < 1) { Threads.sleep(100); } + TEST_UTIL.waitUntilAllSystemRegionsAssigned(); rs = cluster.getRegionServer(0); metricsRegionServer = rs.getRegionServerMetrics(); @@ -198,7 +199,7 @@ public class TestRegionServerMetrics { @Test public void testRegionCount() throws Exception { - metricsHelper.assertGauge("regionCount", 1, serverSource); + metricsHelper.assertGauge("regionCount", 2, serverSource); } @Test @@ -278,7 +279,7 @@ public class TestRegionServerMetrics { TEST_UTIL.getHBaseAdmin().flush(tableName); metricsRegionServer.getRegionServerWrapper().forceRecompute(); - assertGauge("storeCount", 1); + assertGauge("storeCount", 3); assertGauge("storeFileCount", 1); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java index 02040e9..363fc93 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java @@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.replication.regionserver.TestSourceFSConfiguratio import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.HFileTestUtil; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -89,7 +90,7 @@ public class TestMasterReplication { private MiniZooKeeperCluster miniZK; private static final long SLEEP_TIME = 500; - private static final int NB_RETRIES = 10; + private static final int NB_RETRIES = 20; private static final TableName tableName = TableName.valueOf("test"); private static final byte[] famName = Bytes.toBytes("f"); @@ -173,7 +174,7 @@ public class TestMasterReplication { * It tests the replication scenario involving 0 -> 1 -> 0. It does it by bulk loading a set of * HFiles to a table in each cluster, checking if it's replicated. */ - @Test(timeout = 300000) + @Test(timeout = 400000) public void testHFileCyclicReplication() throws Exception { LOG.info("testHFileCyclicReplication"); int numClusters = 2; @@ -229,7 +230,7 @@ public class TestMasterReplication { * originating from itself and also the edits that it received using replication from a different * cluster. The scenario is explained in HBASE-9158 */ - @Test(timeout = 300000) + @Test(timeout = 400000) public void testCyclicReplication2() throws Exception { LOG.info("testCyclicReplication1"); int numClusters = 3; @@ -339,7 +340,7 @@ public class TestMasterReplication { * families. It does it by bulk loading a set of HFiles belonging to both the CFs of table and set * only one CF data to replicate. */ - @Test(timeout = 300000) + @Test(timeout = 400000) public void testHFileReplicationForConfiguredTableCfs() throws Exception { LOG.info("testHFileReplicationForConfiguredTableCfs"); int numClusters = 2; @@ -393,7 +394,7 @@ public class TestMasterReplication { /** * Tests cyclic replication scenario of 0 -> 1 -> 2 -> 1. */ - @Test(timeout = 300000) + @Test(timeout = 400000) public void testCyclicReplication3() throws Exception { LOG.info("testCyclicReplication2"); int numClusters = 3; @@ -453,6 +454,11 @@ public class TestMasterReplication { configurations[i] = conf; new ZooKeeperWatcher(conf, "cluster" + i, null, true); } + long start = EnvironmentEdgeManager.currentTime(); + for (int i = 0; i < numClusters; i++) { + utilities[i].waitUntilAllSystemRegionsAssigned(); + } + LOG.info("Spent " + (EnvironmentEdgeManager.currentTime()-start) + " ms for system tables"); } private void shutDownMiniClusters() throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java index f7def51..7dbecdc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java @@ -242,8 +242,8 @@ public class TestTablePermissions { // check full load Map> allPerms = AccessControlLists.loadAll(conf); - assertEquals("Full permission map should have entries for both test tables", - 2, allPerms.size()); + assertTrue("Full permission map should have entries for both test tables", + 2 <= allPerms.size()); userPerms = allPerms.get(TEST_TABLE.getName()).get("hubert"); assertNotNull(userPerms); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java index f67296d..0a6e422 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java @@ -91,6 +91,7 @@ public class TestVisibilityLabelsWithACL { TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME.getName(), 50000); // Wait for the labels table to become available TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000); + TEST_UTIL.waitUntilAllSystemRegionsAssigned(); addLabels(); // Create users for testing diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java index ecfe521..9b9dcab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java @@ -129,8 +129,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { admin = connection.getAdmin(); admin.setBalancerRunning(false, true); - TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); - TEST_UTIL.waitUntilAllRegionsAssigned(TableName.NAMESPACE_TABLE_NAME); + TEST_UTIL.waitUntilAllSystemRegionsAssigned(); + } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java index 97b6ddb..79fe16a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java @@ -103,7 +103,7 @@ public class OfflineMetaRebuildTestCore { tableIdx++; htbl = setupTable(table); populateTable(htbl); - assertEquals(5, scanMeta()); + assertEquals(6, scanMeta()); LOG.info("Table " + table + " has " + tableRowCount(conf, table) + " entries."); assertEquals(16, tableRowCount(conf, table)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java index 17a208f..4e03846 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java @@ -51,7 +51,7 @@ public class TestOfflineMetaRebuildBase extends OfflineMetaRebuildTestCore { wipeOutMeta(); // is meta really messed up? - assertEquals(1, scanMeta()); + assertEquals(2, scanMeta()); assertErrors(doFsck(conf, false), new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, @@ -81,7 +81,7 @@ public class TestOfflineMetaRebuildBase extends OfflineMetaRebuildTestCore { LOG.info("No more RIT in ZK, now doing final test verification"); // everything is good again. - assertEquals(5, scanMeta()); // including table state rows + assertEquals(6, scanMeta()); // including table state rows TableName[] tableNames = TEST_UTIL.getHBaseAdmin().listTableNames(); for (TableName tableName : tableNames) { HTableDescriptor tableDescriptor = TEST_UTIL.getHBaseAdmin().getTableDescriptor(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java index b8565e3..783974e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java @@ -52,7 +52,7 @@ public class TestOfflineMetaRebuildHole extends OfflineMetaRebuildTestCore { wipeOutMeta(); // is meta really messed up? - assertEquals(1, scanMeta()); + assertEquals(2, scanMeta()); assertErrors(doFsck(conf, false), new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, @@ -77,7 +77,7 @@ public class TestOfflineMetaRebuildHole extends OfflineMetaRebuildTestCore { TEST_UTIL.waitUntilNoRegionsInTransition(60000); // Meta still messed up. - assertEquals(1, scanMeta()); + assertEquals(2, scanMeta()); HTableDescriptor[] htbls = getTables(TEST_UTIL.getConfiguration()); LOG.info("Tables present after restart: " + Arrays.toString(htbls)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java index ae72935..620a86c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java @@ -55,7 +55,7 @@ public class TestOfflineMetaRebuildOverlap extends OfflineMetaRebuildTestCore { wipeOutMeta(); // is meta really messed up? - assertEquals(1, scanMeta()); + assertEquals(2, scanMeta()); assertErrors(doFsck(conf, false), new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, @@ -86,7 +86,7 @@ public class TestOfflineMetaRebuildOverlap extends OfflineMetaRebuildTestCore { LOG.info("No more RIT in ZK, now doing final test verification"); // Meta still messed up. - assertEquals(1, scanMeta()); + assertEquals(2, scanMeta()); HTableDescriptor[] htbls = getTables(TEST_UTIL.getConfiguration()); LOG.info("Tables present after restart: " + Arrays.toString(htbls)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java new file mode 100644 index 0000000..b4d7427 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java @@ -0,0 +1,497 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus; + + +/** + * An object to encapsulate the information for each backup request + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BackupInfo implements Comparable { + private static final Log LOG = LogFactory.getLog(BackupInfo.class); + // backup status flag + public static enum BackupState { + WAITING, RUNNING, COMPLETE, FAILED, ANY; + } + // backup phase + public static enum BackupPhase { + SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST; + } + + // backup id: a timestamp when we request the backup + private String backupId; + + // backup type, full or incremental + private BackupType type; + + // target root directory for storing the backup files + private String targetRootDir; + + // overall backup state + private BackupState state; + + // overall backup phase + private BackupPhase phase; + + // overall backup failure message + private String failedMsg; + + // backup status map for all tables + private Map backupStatusMap; + + // actual start timestamp of the backup process + private long startTs; + + // actual end timestamp of the backup process, could be fail or complete + private long endTs; + + // the total bytes of incremental logs copied + private long totalBytesCopied; + + // for incremental backup, the location of the backed-up hlogs + private String hlogTargetDir = null; + + // incremental backup file list + transient private List incrBackupFileList; + + // new region server log timestamps for table set after distributed log roll + // key - table name, value - map of RegionServer hostname -> last log rolled timestamp + transient private HashMap> tableSetTimestampMap; + + // backup progress in %% (0-100) + private int progress; + + // distributed job id + private String jobId; + + // Number of parallel workers. -1 - system defined + private int workers = -1; + + // Bandwidth per worker in MB per sec. -1 - unlimited + private long bandwidth = -1; + + public BackupInfo() { + } + + public BackupInfo(String backupId, BackupType type, TableName[] tables, String targetRootDir) { + backupStatusMap = new HashMap(); + + this.backupId = backupId; + this.type = type; + this.targetRootDir = targetRootDir; + if(LOG.isDebugEnabled()){ + LOG.debug("CreateBackupContext: " + tables.length+" "+tables[0] ); + } + this.addTables(tables); + + if (type == BackupType.INCREMENTAL) { + setHlogTargetDir(BackupClientUtil.getLogBackupDir(targetRootDir, backupId)); + } + + this.startTs = 0; + this.endTs = 0; + } + + public String getJobId() { + return jobId; + } + + public void setJobId(String jobId) { + this.jobId = jobId; + } + + public int getWorkers() { + return workers; + } + + public void setWorkers(int workers) { + this.workers = workers; + } + + public long getBandwidth() { + return bandwidth; + } + + public void setBandwidth(long bandwidth) { + this.bandwidth = bandwidth; + } + + public void setBackupStatusMap(Map backupStatusMap) { + this.backupStatusMap = backupStatusMap; + } + + public HashMap> getTableSetTimestampMap() { + return tableSetTimestampMap; + } + + public void setTableSetTimestampMap(HashMap> tableSetTimestampMap) { + this.tableSetTimestampMap = tableSetTimestampMap; + } + + public String getHlogTargetDir() { + return hlogTargetDir; + } + + public void setType(BackupType type) { + this.type = type; + } + + public void setTargetRootDir(String targetRootDir) { + this.targetRootDir = targetRootDir; + } + + public void setTotalBytesCopied(long totalBytesCopied) { + this.totalBytesCopied = totalBytesCopied; + } + + /** + * Set progress (0-100%) + * @param p progress value + */ + + public void setProgress(int p) { + this.progress = p; + } + + /** + * Get current progress + */ + public int getProgress() { + return progress; + } + + public String getBackupId() { + return backupId; + } + + public void setBackupId(String backupId) { + this.backupId = backupId; + } + + public BackupStatus getBackupStatus(TableName table) { + return this.backupStatusMap.get(table); + } + + public String getFailedMsg() { + return failedMsg; + } + + public void setFailedMsg(String failedMsg) { + this.failedMsg = failedMsg; + } + + public long getStartTs() { + return startTs; + } + + public void setStartTs(long startTs) { + this.startTs = startTs; + } + + public long getEndTs() { + return endTs; + } + + public void setEndTs(long endTs) { + this.endTs = endTs; + } + + public long getTotalBytesCopied() { + return totalBytesCopied; + } + + public BackupState getState() { + return state; + } + + public void setState(BackupState flag) { + this.state = flag; + } + + public BackupPhase getPhase() { + return phase; + } + + public void setPhase(BackupPhase phase) { + this.phase = phase; + } + + public BackupType getType() { + return type; + } + + public void setSnapshotName(TableName table, String snapshotName) { + this.backupStatusMap.get(table).setSnapshotName(snapshotName); + } + + public String getSnapshotName(TableName table) { + return this.backupStatusMap.get(table).getSnapshotName(); + } + + public List getSnapshotNames() { + List snapshotNames = new ArrayList(); + for (BackupStatus backupStatus : this.backupStatusMap.values()) { + snapshotNames.add(backupStatus.getSnapshotName()); + } + return snapshotNames; + } + + public Set getTables() { + return this.backupStatusMap.keySet(); + } + + public List getTableNames() { + return new ArrayList(backupStatusMap.keySet()); + } + + public void addTables(TableName[] tables) { + for (TableName table : tables) { + BackupStatus backupStatus = new BackupStatus(table, this.targetRootDir, this.backupId); + this.backupStatusMap.put(table, backupStatus); + } + } + + public String getTargetRootDir() { + return targetRootDir; + } + + public void setHlogTargetDir(String hlogTagetDir) { + this.hlogTargetDir = hlogTagetDir; + } + + public String getHLogTargetDir() { + return hlogTargetDir; + } + + public List getIncrBackupFileList() { + return incrBackupFileList; + } + + public void setIncrBackupFileList(List incrBackupFileList) { + this.incrBackupFileList = incrBackupFileList; + } + + /** + * Set the new region server log timestamps after distributed log roll + * @param newTableSetTimestampMap table timestamp map + */ + public void setIncrTimestampMap(HashMap> newTableSetTimestampMap) { + this.tableSetTimestampMap = newTableSetTimestampMap; + } + + /** + * Get new region server log timestamps after distributed log roll + * @return new region server log timestamps + */ + public HashMap> getIncrTimestampMap() { + return this.tableSetTimestampMap; + } + + public TableName getTableBySnapshot(String snapshotName) { + for (Entry entry : this.backupStatusMap.entrySet()) { + if (snapshotName.equals(entry.getValue().getSnapshotName())) { + return entry.getKey(); + } + } + return null; + } + + public BackupProtos.BackupInfo toProtosBackupInfo() { + BackupProtos.BackupInfo.Builder builder = BackupProtos.BackupInfo.newBuilder(); + builder.setBackupId(getBackupId()); + setBackupStatusMap(builder); + builder.setEndTs(getEndTs()); + if (getFailedMsg() != null) { + builder.setFailedMessage(getFailedMsg()); + } + if (getState() != null) { + builder.setState(BackupProtos.BackupInfo.BackupState.valueOf(getState().name())); + } + if (getPhase() != null) { + builder.setPhase(BackupProtos.BackupInfo.BackupPhase.valueOf(getPhase().name())); + } + + builder.setProgress(getProgress()); + builder.setStartTs(getStartTs()); + builder.setTargetRootDir(getTargetRootDir()); + builder.setType(BackupProtos.BackupType.valueOf(getType().name())); + builder.setWorkersNumber(workers); + builder.setBandwidth(bandwidth); + if (jobId != null) { + builder.setJobId(jobId); + } + return builder.build(); + } + + public byte[] toByteArray() throws IOException { + return toProtosBackupInfo().toByteArray(); + } + + private void setBackupStatusMap(Builder builder) { + for (Entry entry: backupStatusMap.entrySet()) { + builder.addTableBackupStatus(entry.getValue().toProto()); + } + } + + public static BackupInfo fromByteArray(byte[] data) throws IOException { + return fromProto(BackupProtos.BackupInfo.parseFrom(data)); + } + + public static BackupInfo fromStream(final InputStream stream) throws IOException { + return fromProto(BackupProtos.BackupInfo.parseDelimitedFrom(stream)); + } + + public static BackupInfo fromProto(BackupProtos.BackupInfo proto) { + BackupInfo context = new BackupInfo(); + context.setBackupId(proto.getBackupId()); + context.setBackupStatusMap(toMap(proto.getTableBackupStatusList())); + context.setEndTs(proto.getEndTs()); + if (proto.hasFailedMessage()) { + context.setFailedMsg(proto.getFailedMessage()); + } + if (proto.hasState()) { + context.setState(BackupInfo.BackupState.valueOf(proto.getState().name())); + } + + context.setHlogTargetDir(BackupClientUtil.getLogBackupDir(proto.getTargetRootDir(), + proto.getBackupId())); + + if (proto.hasPhase()) { + context.setPhase(BackupPhase.valueOf(proto.getPhase().name())); + } + if (proto.hasProgress()) { + context.setProgress(proto.getProgress()); + } + context.setStartTs(proto.getStartTs()); + context.setTargetRootDir(proto.getTargetRootDir()); + context.setType(BackupType.valueOf(proto.getType().name())); + context.setWorkers(proto.getWorkersNumber()); + context.setBandwidth(proto.getBandwidth()); + if (proto.hasJobId()) { + context.setJobId(proto.getJobId()); + } + return context; + } + + private static Map toMap(List list) { + HashMap map = new HashMap<>(); + for (TableBackupStatus tbs : list){ + map.put(ProtobufUtil.toTableName(tbs.getTable()), BackupStatus.convert(tbs)); + } + return map; + } + + public String getShortDescription() { + StringBuilder sb = new StringBuilder(); + sb.append("ID : " + backupId).append("\n"); + sb.append("Type : " + getType()).append("\n"); + sb.append("Tables : " + getTableListAsString()).append("\n"); + sb.append("State : " + getState()).append("\n"); + Date date = null; + Calendar cal = Calendar.getInstance(); + cal.setTimeInMillis(getStartTs()); + date = cal.getTime(); + sb.append("Start time : " + date).append("\n"); + if (state == BackupState.FAILED) { + sb.append("Failed message : " + getFailedMsg()).append("\n"); + } else if (state == BackupState.RUNNING) { + sb.append("Phase : " + getPhase()).append("\n"); + } else if (state == BackupState.COMPLETE) { + cal = Calendar.getInstance(); + cal.setTimeInMillis(getEndTs()); + date = cal.getTime(); + sb.append("End time : " + date).append("\n"); + } + sb.append("Progress : " + getProgress()).append("\n"); + return sb.toString(); + } + + public String getStatusAndProgressAsString() { + StringBuilder sb = new StringBuilder(); + sb.append("id: ").append(getBackupId()).append(" state: ").append(getState()) + .append(" progress: ").append(getProgress()); + return sb.toString(); + } + + public String getTableListAsString() { + return StringUtils.join(backupStatusMap.keySet(), ","); + } + + @Override + public int compareTo(BackupInfo o) { + Long thisTS = + Long.valueOf(this.getBackupId().substring(this.getBackupId().lastIndexOf("_") + 1)); + Long otherTS = + Long.valueOf(o.getBackupId().substring(o.getBackupId().lastIndexOf("_") + 1)); + return thisTS.compareTo(otherTS); + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof BackupInfo)) return false; + BackupInfo other = (BackupInfo) obj; + return compareTo(other) == 0; + } + + @Override + public int hashCode() { + int hash = 31 * type.hashCode() + backupId != null ? backupId.hashCode() : 0; + if (targetRootDir != null) { + hash = 31 * hash + targetRootDir.hashCode(); + } + hash = 31 * hash + state.hashCode(); + hash = 31 * hash + phase.hashCode(); + hash = 31 * hash + (int)(startTs ^ (startTs >>> 32)); + hash = 31 * hash + (int)(endTs ^ (endTs >>> 32)); + hash = 31 * hash + (int)(totalBytesCopied ^ (totalBytesCopied >>> 32)); + if (hlogTargetDir != null) { + hash = 31 * hash + hlogTargetDir.hashCode(); + } + if (jobId != null) { + hash = 31 * hash + jobId.hashCode(); + } + return hash; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java new file mode 100644 index 0000000..bbf421c --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupRequest.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import java.util.List; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * POJO class for backup request + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public final class BackupRequest { + private BackupType type; + private List tableList; + private String targetRootDir; + private int workers = -1; + private long bandwidth = -1L; + + public BackupRequest() { + } + + public BackupRequest setBackupType(BackupType type) { + this.type = type; + return this; + } + public BackupType getBackupType() { + return this.type; + } + + public BackupRequest setTableList(List tableList) { + this.tableList = tableList; + return this; + } + public List getTableList() { + return this.tableList; + } + + public BackupRequest setTargetRootDir(String targetRootDir) { + this.targetRootDir = targetRootDir; + return this; + } + public String getTargetRootDir() { + return this.targetRootDir; + } + + public BackupRequest setWorkers(int workers) { + this.workers = workers; + return this; + } + public int getWorkers() { + return this.workers; + } + + public BackupRequest setBandwidth(long bandwidth) { + this.bandwidth = bandwidth; + return this; + } + public long getBandwidth() { + return this.bandwidth; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreClientFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreClientFactory.java new file mode 100644 index 0000000..b60ab21 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreClientFactory.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.util.ReflectionUtils; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class BackupRestoreClientFactory { + private static final Log LOG = LogFactory.getLog(BackupRestoreClientFactory.class); + + private BackupRestoreClientFactory(){ + throw new AssertionError("Instantiating utility class..."); + } + + + /** + * Gets restore client implementation + * @param conf - configuration + * @return backup client + */ + public static RestoreClient getRestoreClient(Configuration conf) { + try{ + Class cls = + conf.getClassByName("org.apache.hadoop.hbase.backup.impl.RestoreClientImpl"); + + RestoreClient client = (RestoreClient) ReflectionUtils.newInstance(cls, conf); + client.setConf(conf); + return client; + } catch(Exception e){ + LOG.error("Can not instantiate RestoreClient. Make sure you have hbase-server jar in CLASSPATH", e); + } + return null; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java new file mode 100644 index 0000000..4b030fd --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupStatus.java @@ -0,0 +1,104 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import java.io.Serializable; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; + +/** + * Backup status and related information encapsulated for a table. + * At this moment only TargetDir and SnapshotName is encapsulated here. + */ + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BackupStatus implements Serializable { + + private static final long serialVersionUID = -5968397963548535982L; + + // table name for backup + private transient TableName table; + + // target directory of the backup image for this table + private String targetDir; + + // snapshot name for offline/online snapshot + private String snapshotName = null; + + public BackupStatus() { + + } + + public BackupStatus(TableName table, String targetRootDir, String backupId) { + this.table = table; + this.targetDir = BackupClientUtil.getTableBackupDir(targetRootDir, backupId, table); + } + + public String getSnapshotName() { + return snapshotName; + } + + public void setSnapshotName(String snapshotName) { + this.snapshotName = snapshotName; + } + + public String getTargetDir() { + return targetDir; + } + + public TableName getTable() { + return table; + } + + public void setTable(TableName table) { + this.table = table; + } + + public void setTargetDir(String targetDir) { + this.targetDir = targetDir; + } + + public static BackupStatus convert(BackupProtos.TableBackupStatus proto) + { + BackupStatus bs = new BackupStatus(); + bs.setTable(ProtobufUtil.toTableName(proto.getTable())); + bs.setTargetDir(proto.getTargetDir()); + if(proto.hasSnapshot()){ + bs.setSnapshotName(proto.getSnapshot()); + } + return bs; + } + + public BackupProtos.TableBackupStatus toProto() { + BackupProtos.TableBackupStatus.Builder builder = + BackupProtos.TableBackupStatus.newBuilder(); + if(snapshotName != null) { + builder.setSnapshot(snapshotName); + } + builder.setTable(ProtobufUtil.toProtoTableName(table)); + builder.setTargetDir(targetDir); + return builder.build(); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreClient.java new file mode 100644 index 0000000..07f573e --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreClient.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface RestoreClient { + + public void setConf(Configuration conf); + + /** + * Restore operation. + * @param backupRootDir The root dir for backup image + * @param backupId The backup id for image to be restored + * @param check True if only do dependency check + * @param sTableArray The array of tables to be restored + * @param tTableArray The array of mapping tables to restore to + * @param isOverwrite True then do restore overwrite if target table exists, otherwise fail the + * request if target table exists + * @throws IOException if any failure during restore + */ + public void restore( + String backupRootDir, + String backupId, boolean check, TableName[] sTableArray, + TableName[] tTableArray, boolean isOverwrite) throws IOException; +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java new file mode 100644 index 0000000..c78f0bc --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Evolving + +/** + * POJO class for restore request + */ +public class RestoreRequest { + + private String backupRootDir; + private String backupId; + private boolean check = false; + private TableName[] fromTables; + private TableName[] toTables; + private boolean overwrite = false; + + public RestoreRequest(){ + } + + public String getBackupRootDir() { + return backupRootDir; + } + + public RestoreRequest setBackupRootDir(String backupRootDir) { + this.backupRootDir = backupRootDir; + return this; + } + + public String getBackupId() { + return backupId; + } + + public RestoreRequest setBackupId(String backupId) { + this.backupId = backupId; + return this; + } + + public boolean isCheck() { + return check; + } + + public RestoreRequest setCheck(boolean check) { + this.check = check; + return this; + } + + public TableName[] getFromTables() { + return fromTables; + } + + public RestoreRequest setFromTables(TableName[] fromTables) { + this.fromTables = fromTables; + return this; + + } + + public TableName[] getToTables() { + return toTables; + } + + public RestoreRequest setToTables(TableName[] toTables) { + this.toTables = toTables; + return this; + } + + public boolean isOverwrite() { + return overwrite; + } + + public RestoreRequest setOverwrite(boolean overwrite) { + this.overwrite = overwrite; + return this; + } + + +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java new file mode 100644 index 0000000..0339ca0 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java @@ -0,0 +1,558 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.List; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupRequest; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants.BackupCommand; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.backup.util.BackupSet; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.BackupAdmin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; + +import com.google.common.collect.Lists; + +/** + * General backup commands, options and usage messages + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class BackupCommands { + + private static final String USAGE = "Usage: hbase backup COMMAND\n" + + "where COMMAND is one of:\n" + + " create create a new backup image\n" + + " cancel cancel an ongoing backup\n" + + " delete delete an existing backup image\n" + + " describe show the detailed information of a backup image\n" + + " history show history of all successful backups\n" + + " progress show the progress of the latest backup request\n" + + " set backup set management\n" + + "Enter \'help COMMAND\' to see help message for each command\n"; + + private static final String CREATE_CMD_USAGE = + "Usage: hbase backup create [tables] [-s name] [-convert] " + + "[-silent] [-w workers][-b bandwith]\n" + " type \"full\" to create a full backup image;\n" + + " \"incremental\" to create an incremental backup image\n" + + " backup_root_path The full root path to store the backup image,\n" + + " the prefix can be hdfs, webhdfs or gpfs\n" + " Options:\n" + + " tables If no tables (\"\") are specified, all tables are backed up. " + + "Otherwise it is a\n" + " comma separated list of tables.\n" + + " -w number of parallel workers.\n" + + " -b bandwith per one worker (in MB sec)\n" + + " -set name of backup set" ; + + private static final String PROGRESS_CMD_USAGE = "Usage: hbase backup progress \n" + + " backupId backup image id;\n"; + + private static final String DESCRIBE_CMD_USAGE = "Usage: hbase backup decsribe \n" + + " backupId backup image id\n"; + + private static final String HISTORY_CMD_USAGE = "Usage: hbase backup history [-n N]\n" + + " -n N show up to N last backup sessions, default - 10;\n"; + + private static final String DELETE_CMD_USAGE = "Usage: hbase backup delete \n" + + " backupId backup image id;\n"; + + private static final String CANCEL_CMD_USAGE = "Usage: hbase backup cancel \n" + + " backupId backup image id;\n"; + + private static final String SET_CMD_USAGE = "Usage: hbase backup set COMMAND [name] [tables]\n" + + " name Backup set name\n" + + " tables If no tables (\"\") are specified, all tables will belong to the set. " + + "Otherwise it is a\n" + " comma separated list of tables.\n" + + "where COMMAND is one of:\n" + + " add add tables to a set, crete set if needed\n" + + " remove remove tables from set\n" + + " list list all sets\n" + + " describe describes set\n" + + " delete delete backup set\n"; + + public static abstract class Command extends Configured { + Command(Configuration conf) { + super(conf); + } + public abstract void execute() throws IOException; + } + + private BackupCommands() { + throw new AssertionError("Instantiating utility class..."); + } + + public static Command createCommand(Configuration conf, BackupCommand type, CommandLine cmdline) { + Command cmd = null; + switch (type) { + case CREATE: + cmd = new CreateCommand(conf, cmdline); + break; + case DESCRIBE: + cmd = new DescribeCommand(conf, cmdline); + break; + case PROGRESS: + cmd = new ProgressCommand(conf, cmdline); + break; + case DELETE: + cmd = new DeleteCommand(conf, cmdline); + break; + case CANCEL: + cmd = new CancelCommand(conf, cmdline); + break; + case HISTORY: + cmd = new HistoryCommand(conf, cmdline); + break; + case SET: + cmd = new BackupSetCommand(conf, cmdline); + break; + case HELP: + default: + cmd = new HelpCommand(conf, cmdline); + break; + } + return cmd; + } + + + public static class CreateCommand extends Command { + CommandLine cmdline; + + CreateCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + if (cmdline == null || cmdline.getArgs() == null) { + System.out.println("ERROR: missing arguments"); + System.out.println(CREATE_CMD_USAGE); + System.exit(-1); + } + String[] args = cmdline.getArgs(); + if (args.length < 3 || args.length > 4) { + System.out.println("ERROR: wrong number of arguments"); + System.out.println(CREATE_CMD_USAGE); + System.exit(-1); + } + + if (!BackupType.FULL.toString().equalsIgnoreCase(args[1]) + && !BackupType.INCREMENTAL.toString().equalsIgnoreCase(args[1])) { + System.out.println("ERROR: invalid backup type"); + System.out.println(CREATE_CMD_USAGE); + System.exit(-1); + } + + String tables = null; + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + + // Check backup set + if (cmdline.hasOption("set")) { + String setName = cmdline.getOptionValue("set"); + tables = getTablesForSet(setName, conf); + + if (tables == null) throw new IOException("Backup set '" + setName + + "' is either empty or does not exist"); + } else { + tables = (args.length == 4) ? args[3] : null; + } + int bandwidth = cmdline.hasOption('b') ? Integer.parseInt(cmdline.getOptionValue('b')) : -1; + int workers = cmdline.hasOption('w') ? Integer.parseInt(cmdline.getOptionValue('w')) : -1; + + try (Connection conn = ConnectionFactory.createConnection(getConf()); + Admin admin = conn.getAdmin(); + BackupAdmin backupAdmin = admin.getBackupAdmin();) { + BackupRequest request = new BackupRequest(); + request.setBackupType(BackupType.valueOf(args[1].toUpperCase())) + .setTableList(tables != null?Lists.newArrayList(BackupClientUtil.parseTableNames(tables)): null) + .setTargetRootDir(args[2]).setWorkers(workers).setBandwidth(bandwidth); + backupAdmin.backupTables(request); + } catch (IOException e) { + throw e; + } + } + private String getTablesForSet(String name, Configuration conf) + throws IOException { + try (final Connection conn = ConnectionFactory.createConnection(conf); + final BackupSystemTable table = new BackupSystemTable(conn)) { + List tables = table.describeBackupSet(name); + if (tables == null) return null; + return StringUtils.join(tables, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND); + } + } + } + + private static class HelpCommand extends Command { + CommandLine cmdline; + + HelpCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + if (cmdline == null) { + System.out.println(USAGE); + System.exit(0); + } + + String[] args = cmdline.getArgs(); + if (args == null || args.length == 0) { + System.out.println(USAGE); + System.exit(0); + } + + if (args.length != 2) { + System.out.println("Only support check help message of a single command type"); + System.out.println(USAGE); + System.exit(0); + } + + String type = args[1]; + + if (BackupCommand.CREATE.name().equalsIgnoreCase(type)) { + System.out.println(CREATE_CMD_USAGE); + } else if (BackupCommand.DESCRIBE.name().equalsIgnoreCase(type)) { + System.out.println(DESCRIBE_CMD_USAGE); + } else if (BackupCommand.HISTORY.name().equalsIgnoreCase(type)) { + System.out.println(HISTORY_CMD_USAGE); + } else if (BackupCommand.PROGRESS.name().equalsIgnoreCase(type)) { + System.out.println(PROGRESS_CMD_USAGE); + } else if (BackupCommand.DELETE.name().equalsIgnoreCase(type)) { + System.out.println(DELETE_CMD_USAGE); + } else if (BackupCommand.CANCEL.name().equalsIgnoreCase(type)) { + System.out.println(CANCEL_CMD_USAGE); + } else if (BackupCommand.SET.name().equalsIgnoreCase(type)) { + System.out.println(SET_CMD_USAGE); + } else { + System.out.println("Unknown command : " + type); + System.out.println(USAGE); + } + System.exit(0); + } + } + + private static class DescribeCommand extends Command { + CommandLine cmdline; + + DescribeCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + if (cmdline == null || cmdline.getArgs() == null) { + System.out.println("ERROR: missing arguments"); + System.out.println(DESCRIBE_CMD_USAGE); + System.exit(-1); + } + String[] args = cmdline.getArgs(); + if (args.length != 2) { + System.out.println("ERROR: wrong number of arguments"); + System.out.println(DESCRIBE_CMD_USAGE); + System.exit(-1); + } + + String backupId = args[1]; + Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); + try (final Connection conn = ConnectionFactory.createConnection(conf); + final BackupAdmin admin = conn.getAdmin().getBackupAdmin();) { + BackupInfo info = admin.getBackupInfo(backupId); + System.out.println(info.getShortDescription()); + } + } + } + + private static class ProgressCommand extends Command { + CommandLine cmdline; + + ProgressCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + if (cmdline == null || cmdline.getArgs() == null || + cmdline.getArgs().length != 2) { + System.out.println("No backup id was specified, " + + "will retrieve the most recent (ongoing) sessions"); + } + String[] args = cmdline == null ? null : cmdline.getArgs(); + if (args != null && args.length > 2) { + System.out.println("ERROR: wrong number of arguments: " + args.length); + System.out.println(PROGRESS_CMD_USAGE); + System.exit(-1); + } + + String backupId = args == null ? null : args[1]; + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + try(final Connection conn = ConnectionFactory.createConnection(conf); + final BackupAdmin admin = conn.getAdmin().getBackupAdmin();){ + int progress = admin.getProgress(backupId); + if(progress < 0){ + System.out.println("No info was found for backup id: "+backupId); + } else{ + System.out.println(backupId+" progress=" + progress+"%"); + } + } + } + } + + private static class DeleteCommand extends Command { + + CommandLine cmdline; + DeleteCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length < 2) { + System.out.println("No backup id(s) was specified"); + System.out.println(PROGRESS_CMD_USAGE); + System.exit(-1); + } + String[] args = cmdline.getArgs(); + + String[] backupIds = new String[args.length - 1]; + System.arraycopy(args, 1, backupIds, 0, backupIds.length); + Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); + try (final Connection conn = ConnectionFactory.createConnection(conf); + final BackupAdmin admin = conn.getAdmin().getBackupAdmin();) { + int deleted = admin.deleteBackups(args); + System.out.println("Deleted " + deleted + " backups. Total requested: " + args.length); + } + + } + } + +// TODO Cancel command + + private static class CancelCommand extends Command { + CommandLine cmdline; + + CancelCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length < 2) { + System.out.println("No backup id(s) was specified, will use the most recent one"); + } + // String[] args = cmdline.getArgs(); + // String backupId = args == null || args.length == 0 ? null : args[1]; + Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); + try (final Connection conn = ConnectionFactory.createConnection(conf); + final BackupAdmin admin = conn.getAdmin().getBackupAdmin();) { + // TODO cancel backup + } + } + } + + private static class HistoryCommand extends Command { + CommandLine cmdline; + private final static int DEFAULT_HISTORY_LENGTH = 10; + + HistoryCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + + int n = parseHistoryLength(); + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + try(final Connection conn = ConnectionFactory.createConnection(conf); + final BackupAdmin admin = conn.getAdmin().getBackupAdmin();){ + List history = admin.getHistory(n); + for(BackupInfo info: history){ + System.out.println(info.getShortDescription()); + } + } + } + + private int parseHistoryLength() { + String value = cmdline.getOptionValue("n"); + if (value == null) return DEFAULT_HISTORY_LENGTH; + return Integer.parseInt(value); + } + } + + private static class BackupSetCommand extends Command { + private final static String SET_ADD_CMD = "add"; + private final static String SET_REMOVE_CMD = "remove"; + private final static String SET_DELETE_CMD = "delete"; + private final static String SET_DESCRIBE_CMD = "describe"; + private final static String SET_LIST_CMD = "list"; + + CommandLine cmdline; + + BackupSetCommand(Configuration conf, CommandLine cmdline) { + super(conf); + this.cmdline = cmdline; + } + + @Override + public void execute() throws IOException { + + // Command-line must have at least one element + if (cmdline == null || cmdline.getArgs() == null || cmdline.getArgs().length < 2) { + throw new IOException("command line format"); + } + String[] args = cmdline.getArgs(); + String cmdStr = args[1]; + BackupCommand cmd = getCommand(cmdStr); + + switch (cmd) { + case SET_ADD: + processSetAdd(args); + break; + case SET_REMOVE: + processSetRemove(args); + break; + case SET_DELETE: + processSetDelete(args); + break; + case SET_DESCRIBE: + processSetDescribe(args); + break; + case SET_LIST: + processSetList(args); + break; + default: + break; + + } + } + + private void processSetList(String[] args) throws IOException { + // List all backup set names + // does not expect any args + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + try(final Connection conn = ConnectionFactory.createConnection(conf); + final BackupAdmin admin = conn.getAdmin().getBackupAdmin();){ + List list = admin.listBackupSets(); + for(BackupSet bs: list){ + System.out.println(bs); + } + } + } + + private void processSetDescribe(String[] args) throws IOException { + if (args == null || args.length != 3) { + throw new RuntimeException("Wrong number of args: " + (args == null ? 0 : args.length)); + } + String setName = args[2]; + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + try(final Connection conn = ConnectionFactory.createConnection(conf); + final BackupAdmin admin = conn.getAdmin().getBackupAdmin();){ + BackupSet set = admin.getBackupSet(setName); + if(set == null) { + System.out.println("Set '"+setName+"' does not exist."); + } else{ + System.out.println(set); + } + } + } + + private void processSetDelete(String[] args) throws IOException { + if (args == null || args.length != 3) { + throw new RuntimeException("Wrong number of args"); + } + String setName = args[2]; + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + try(final Connection conn = ConnectionFactory.createConnection(conf); + final BackupAdmin admin = conn.getAdmin().getBackupAdmin();){ + boolean result = admin.deleteBackupSet(setName); + if(result){ + System.out.println("Delete set "+setName+" OK."); + } else{ + System.out.println("Set "+setName+" does not exist"); + } + } + } + + private void processSetRemove(String[] args) throws IOException { + if (args == null || args.length != 4) { + throw new RuntimeException("Wrong args"); + } + String setName = args[2]; + String[] tables = args[3].split(","); + Configuration conf = getConf() != null? getConf(): HBaseConfiguration.create(); + try(final Connection conn = ConnectionFactory.createConnection(conf); + final BackupAdmin admin = conn.getAdmin().getBackupAdmin();){ + admin.removeFromBackupSet(setName, tables); + } + } + + private void processSetAdd(String[] args) throws IOException { + if (args == null || args.length != 4) { + throw new RuntimeException("Wrong args"); + } + String setName = args[2]; + String[] tables = args[3].split(","); + TableName[] tableNames = new TableName[tables.length]; + for(int i=0; i < tables.length; i++){ + tableNames[i] = TableName.valueOf(tables[i]); + } + Configuration conf = getConf() != null? getConf():HBaseConfiguration.create(); + try(final Connection conn = ConnectionFactory.createConnection(conf); + final BackupAdmin admin = conn.getAdmin().getBackupAdmin();){ + admin.addToBackupSet(setName, tableNames); + } + + } + + private BackupCommand getCommand(String cmdStr) throws IOException { + if (cmdStr.equals(SET_ADD_CMD)) { + return BackupCommand.SET_ADD; + } else if (cmdStr.equals(SET_REMOVE_CMD)) { + return BackupCommand.SET_REMOVE; + } else if (cmdStr.equals(SET_DELETE_CMD)) { + return BackupCommand.SET_DELETE; + } else if (cmdStr.equals(SET_DESCRIBE_CMD)) { + return BackupCommand.SET_DESCRIBE; + } else if (cmdStr.equals(SET_LIST_CMD)) { + return BackupCommand.SET_LIST; + } else { + throw new IOException("Unknown command for 'set' :" + cmdStr); + } + } + + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java new file mode 100644 index 0000000..ca204b4 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupException.java @@ -0,0 +1,86 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Backup exception + */ +@SuppressWarnings("serial") +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BackupException extends HBaseIOException { + private BackupInfo description; + + /** + * Some exception happened for a backup and don't even know the backup that it was about + * @param msg Full description of the failure + */ + public BackupException(String msg) { + super(msg); + } + + /** + * Some exception happened for a backup with a cause + * @param cause the cause + */ + public BackupException(Throwable cause) { + super(cause); + } + + /** + * Exception for the given backup that has no previous root cause + * @param msg reason why the backup failed + * @param desc description of the backup that is being failed + */ + public BackupException(String msg, BackupInfo desc) { + super(msg); + this.description = desc; + } + + /** + * Exception for the given backup due to another exception + * @param msg reason why the backup failed + * @param cause root cause of the failure + * @param desc description of the backup that is being failed + */ + public BackupException(String msg, Throwable cause, BackupInfo desc) { + super(msg, cause); + this.description = desc; + } + + /** + * Exception when the description of the backup cannot be determined, due to some other root + * cause + * @param message description of what caused the failure + * @param e root cause + */ + public BackupException(String message, Exception e) { + super(message, e); + } + + public BackupInfo getBackupContext() { + return this.description; + } + +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java new file mode 100644 index 0000000..ac1d2bc --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupRestoreConstants.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * BackupRestoreConstants holds a bunch of HBase Backup and Restore constants + */ +@InterfaceAudience.Private +@InterfaceStability.Stable +public final class BackupRestoreConstants { + + + // delimiter in tablename list in restore command + public static final String TABLENAME_DELIMITER_IN_COMMAND = ","; + + public static final String CONF_STAGING_ROOT = "snapshot.export.staging.root"; + + public static final String BACKUPID_PREFIX = "backup_"; + + public static enum BackupCommand { + CREATE, CANCEL, DELETE, DESCRIBE, HISTORY, STATUS, CONVERT, MERGE, STOP, SHOW, HELP, PROGRESS, SET, + SET_ADD, SET_REMOVE, SET_DELETE, SET_DESCRIBE, SET_LIST + } + + private BackupRestoreConstants() { + // Can't be instantiated with this ctor. + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java new file mode 100644 index 0000000..33f04f6 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java @@ -0,0 +1,805 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.impl; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeSet; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; + +/** + * This class provides 'hbase:backup' table API + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class BackupSystemTable implements Closeable { + + static class WALItem { + String backupId; + String walFile; + String backupRoot; + + WALItem(String backupId, String walFile, String backupRoot) + { + this.backupId = backupId; + this.walFile = walFile; + this.backupRoot = backupRoot; + } + + public String getBackupId() { + return backupId; + } + + public String getWalFile() { + return walFile; + } + + public String getBackupRoot() { + return backupRoot; + } + + public String toString() { + return "/"+ backupRoot + "/"+backupId + "/" + walFile; + } + + } + + private static final Log LOG = LogFactory.getLog(BackupSystemTable.class); + private final static TableName tableName = TableName.BACKUP_TABLE_NAME; + // Stores backup sessions (contexts) + final static byte[] SESSIONS_FAMILY = "session".getBytes(); + // Stores other meta + final static byte[] META_FAMILY = "meta".getBytes(); + // Connection to HBase cluster, shared + // among all instances + private final Connection connection; + + public BackupSystemTable(Connection conn) throws IOException { + this.connection = conn; + } + + + public void close() { + // do nothing + } + + /** + * Updates status (state) of a backup session in hbase:backup table + * @param context context + * @throws IOException exception + */ + public void updateBackupInfo(BackupInfo context) throws IOException { + + if (LOG.isDebugEnabled()) { + LOG.debug("update backup status in hbase:backup for: " + context.getBackupId() + + " set status=" + context.getState()); + } + try (Table table = connection.getTable(tableName)) { + Put put = BackupSystemTableHelper.createPutForBackupContext(context); + table.put(put); + } + } + + /** + * Deletes backup status from hbase:backup table + * @param backupId backup id + * @throws IOException exception + */ + public void deleteBackupInfo(String backupId) throws IOException { + + if (LOG.isDebugEnabled()) { + LOG.debug("delete backup status in hbase:backup for " + backupId); + } + try (Table table = connection.getTable(tableName)) { + Delete del = BackupSystemTableHelper.createDeleteForBackupInfo(backupId); + table.delete(del); + } + } + + /** + * Reads backup status object (instance of BackupContext) from hbase:backup table + * @param backupId - backupId + * @return Current status of backup session or null + */ + + public BackupInfo readBackupInfo(String backupId) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("read backup status from hbase:backup for: " + backupId); + } + + try (Table table = connection.getTable(tableName)) { + Get get = BackupSystemTableHelper.createGetForBackupContext(backupId); + Result res = table.get(get); + if(res.isEmpty()){ + return null; + } + return BackupSystemTableHelper.resultToBackupInfo(res); + } + } + + /** + * Read the last backup start code (timestamp) of last successful backup. Will return null if + * there is no start code stored on hbase or the value is of length 0. These two cases indicate + * there is no successful backup completed so far. + * @param backupRoot root directory path to backup + * @return the timestamp of last successful backup + * @throws IOException exception + */ + public String readBackupStartCode(String backupRoot) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("read backup start code from hbase:backup"); + } + try (Table table = connection.getTable(tableName)) { + Get get = BackupSystemTableHelper.createGetForStartCode(backupRoot); + Result res = table.get(get); + if (res.isEmpty()) { + return null; + } + Cell cell = res.listCells().get(0); + byte[] val = CellUtil.cloneValue(cell); + if (val.length == 0){ + return null; + } + return new String(val); + } + } + + /** + * Write the start code (timestamp) to hbase:backup. If passed in null, then write 0 byte. + * @param startCode start code + * @param backupRoot root directory path to backup + * @throws IOException exception + */ + public void writeBackupStartCode(Long startCode, String backupRoot) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("write backup start code to hbase:backup " + startCode); + } + try (Table table = connection.getTable(tableName)) { + Put put = BackupSystemTableHelper.createPutForStartCode(startCode.toString(), backupRoot); + table.put(put); + } + } + + /** + * Get the Region Servers log information after the last log roll from hbase:backup. + * @param backupRoot root directory path to backup + * @return RS log info + * @throws IOException exception + */ + public HashMap readRegionServerLastLogRollResult(String backupRoot) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("read region server last roll log result to hbase:backup"); + } + + Scan scan = BackupSystemTableHelper.createScanForReadRegionServerLastLogRollResult(backupRoot); + + try (Table table = connection.getTable(tableName); + ResultScanner scanner = table.getScanner(scan)) { + Result res = null; + HashMap rsTimestampMap = new HashMap(); + while ((res = scanner.next()) != null) { + res.advance(); + Cell cell = res.current(); + byte[] row = CellUtil.cloneRow(cell); + String server = + BackupSystemTableHelper.getServerNameForReadRegionServerLastLogRollResult(row); + byte[] data = CellUtil.cloneValue(cell); + rsTimestampMap.put(server, Long.parseLong(new String(data))); + } + return rsTimestampMap; + } + } + + /** + * Writes Region Server last roll log result (timestamp) to hbase:backup table + * @param server - Region Server name + * @param ts - last log timestamp + * @param backupRoot root directory path to backup + * @throws IOException exception + */ + public void writeRegionServerLastLogRollResult(String server, Long ts, String backupRoot) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("write region server last roll log result to hbase:backup"); + } + try (Table table = connection.getTable(tableName)) { + Put put = + BackupSystemTableHelper.createPutForRegionServerLastLogRollResult(server,ts,backupRoot); + table.put(put); + } + } + + /** + * Get all completed backup information (in desc order by time) + * @param onlyCompleted, true, if only successfully completed sessions + * @return history info of BackupCompleteData + * @throws IOException exception + */ + public ArrayList getBackupHistory(boolean onlyCompleted) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("get backup history from hbase:backup"); + } + ArrayList list ; + BackupState state = onlyCompleted? BackupState.COMPLETE: BackupState.ANY; + list = getBackupContexts(state); + return BackupClientUtil.sortHistoryListDesc(list); + } + + public ArrayList getBackupHistory() throws IOException { + return getBackupHistory(false); + } + + /** + * Get all backup session with a given status (in desc order by time) + * @param status status + * @return history info of backup contexts + * @throws IOException exception + */ + public ArrayList getBackupContexts(BackupState status) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("get backup contexts from hbase:backup"); + } + + Scan scan = BackupSystemTableHelper.createScanForBackupHistory(); + ArrayList list = new ArrayList(); + + try (Table table = connection.getTable(tableName); + ResultScanner scanner = table.getScanner(scan)) { + Result res = null; + while ((res = scanner.next()) != null) { + res.advance(); + BackupInfo context = BackupSystemTableHelper.cellToBackupInfo(res.current()); + if (status != BackupState.ANY && context.getState() != status){ + continue; + } + list.add(context); + } + return list; + } + } + + /** + * Write the current timestamps for each regionserver to hbase:backup + * after a successful full or incremental backup. The saved timestamp is of the last + * log file that was backed up already. + * @param tables tables + * @param newTimestamps timestamps + * @param backupRoot root directory path to backup + * @throws IOException exception + */ + public void writeRegionServerLogTimestamp(Set tables, + HashMap newTimestamps, String backupRoot) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("write RS log time stamps to hbase:backup for tables ["+ + StringUtils.join(tables, ",")+"]"); + } + List puts = new ArrayList(); + for (TableName table : tables) { + byte[] smapData = toTableServerTimestampProto(table, newTimestamps).toByteArray(); + Put put = + BackupSystemTableHelper.createPutForWriteRegionServerLogTimestamp(table, + smapData, backupRoot); + puts.add(put); + } + try (Table table = connection.getTable(tableName)) { + table.put(puts); + } + } + + /** + * Read the timestamp for each region server log after the last successful backup. Each table has + * its own set of the timestamps. The info is stored for each table as a concatenated string of + * rs->timestapmp + * @param backupRoot root directory path to backup + * @return the timestamp for each region server. key: tableName value: + * RegionServer,PreviousTimeStamp + * @throws IOException exception + */ + public HashMap> readLogTimestampMap(String backupRoot) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("read RS log ts from hbase:backup for root="+ backupRoot); + } + + HashMap> tableTimestampMap = + new HashMap>(); + + Scan scan = BackupSystemTableHelper.createScanForReadLogTimestampMap(backupRoot); + try (Table table = connection.getTable(tableName); + ResultScanner scanner = table.getScanner(scan)) { + Result res = null; + while ((res = scanner.next()) != null) { + res.advance(); + Cell cell = res.current(); + byte[] row = CellUtil.cloneRow(cell); + String tabName = BackupSystemTableHelper.getTableNameForReadLogTimestampMap(row); + TableName tn = TableName.valueOf(tabName); + byte[] data = CellUtil.cloneValue(cell); + if (data == null) { + throw new IOException("Data of last backup data from hbase:backup " + + "is empty. Create a backup first."); + } + if (data != null && data.length > 0) { + HashMap lastBackup = + fromTableServerTimestampProto(BackupProtos.TableServerTimestamp.parseFrom(data)); + tableTimestampMap.put(tn, lastBackup); + } + } + return tableTimestampMap; + } + } + + private BackupProtos.TableServerTimestamp toTableServerTimestampProto(TableName table, + Map map) { + BackupProtos.TableServerTimestamp.Builder tstBuilder = + BackupProtos.TableServerTimestamp.newBuilder(); + tstBuilder.setTable(ProtobufUtil.toProtoTableName(table)); + + for(Entry entry: map.entrySet()) { + BackupProtos.ServerTimestamp.Builder builder = BackupProtos.ServerTimestamp.newBuilder(); + builder.setServer(entry.getKey()); + builder.setTimestamp(entry.getValue()); + tstBuilder.addServerTimestamp(builder.build()); + } + + return tstBuilder.build(); + } + + private HashMap fromTableServerTimestampProto( + BackupProtos.TableServerTimestamp proto) { + HashMap map = new HashMap (); + List list = proto.getServerTimestampList(); + for(BackupProtos.ServerTimestamp st: list) { + map.put(st.getServer(), st.getTimestamp()); + } + return map; + } + + /** + * Return the current tables covered by incremental backup. + * @param backupRoot root directory path to backup + * @return set of tableNames + * @throws IOException exception + */ + public Set getIncrementalBackupTableSet(String backupRoot) + throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("get incr backup table set from hbase:backup"); + } + TreeSet set = new TreeSet<>(); + + try (Table table = connection.getTable(tableName)) { + Get get = BackupSystemTableHelper.createGetForIncrBackupTableSet(backupRoot); + Result res = table.get(get); + if (res.isEmpty()) { + return set; + } + List cells = res.listCells(); + for (Cell cell : cells) { + // qualifier = table name - we use table names as qualifiers + set.add(TableName.valueOf(CellUtil.cloneQualifier(cell))); + } + return set; + } + } + + /** + * Add tables to global incremental backup set + * @param tables - set of tables + * @param backupRoot root directory path to backup + * @throws IOException exception + */ + public void addIncrementalBackupTableSet(Set tables, String backupRoot) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Add incremental backup table set to hbase:backup. ROOT="+backupRoot + + " tables ["+ StringUtils.join(tables, " ")+"]"); + for (TableName table : tables) { + LOG.debug(table); + } + } + try (Table table = connection.getTable(tableName)) { + Put put = BackupSystemTableHelper.createPutForIncrBackupTableSet(tables, backupRoot); + table.put(put); + } + } + + /** + * Register WAL files as eligible for deletion + * @param files files + * @param backupId backup id + * @param backupRoot root directory path to backup + * @throws IOException exception + */ + public void addWALFiles(List files, String backupId, + String backupRoot) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("add WAL files to hbase:backup: "+backupId +" "+backupRoot+" files ["+ + StringUtils.join(files, ",")+"]"); + for(String f: files){ + LOG.debug("add :"+f); + } + } + try (Table table = connection.getTable(tableName)) { + List puts = + BackupSystemTableHelper.createPutsForAddWALFiles(files, backupId, backupRoot); + table.put(puts); + } + } + + /** + * Register WAL files as eligible for deletion + * @param backupRoot root directory path to backup + * @throws IOException exception + */ + public Iterator getWALFilesIterator(String backupRoot) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("get WAL files from hbase:backup"); + } + final Table table = connection.getTable(tableName); + Scan scan = BackupSystemTableHelper.createScanForGetWALs(backupRoot); + final ResultScanner scanner = table.getScanner(scan); + final Iterator it = scanner.iterator(); + return new Iterator() { + + @Override + public boolean hasNext() { + boolean next = it.hasNext(); + if (!next) { + // close all + try { + scanner.close(); + table.close(); + } catch (IOException e) { + LOG.error("Close WAL Iterator", e); + } + } + return next; + } + + @Override + public WALItem next() { + Result next = it.next(); + List cells = next.listCells(); + byte[] buf = cells.get(0).getValueArray(); + int len = cells.get(0).getValueLength(); + int offset = cells.get(0).getValueOffset(); + String backupId = new String(buf, offset, len); + buf = cells.get(1).getValueArray(); + len = cells.get(1).getValueLength(); + offset = cells.get(1).getValueOffset(); + String walFile = new String(buf, offset, len); + buf = cells.get(2).getValueArray(); + len = cells.get(2).getValueLength(); + offset = cells.get(2).getValueOffset(); + String backupRoot = new String(buf, offset, len); + return new WALItem(backupId, walFile, backupRoot); + } + + @Override + public void remove() { + // not implemented + throw new RuntimeException("remove is not supported"); + } + }; + + } + + /** + * Check if WAL file is eligible for deletion + * Future: to support all backup destinations + * @param file file + * @return true, if - yes. + * @throws IOException exception + */ + public boolean isWALFileDeletable(String file) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Check if WAL file has been already backed up in hbase:backup "+ file); + } + try (Table table = connection.getTable(tableName)) { + Get get = BackupSystemTableHelper.createGetForCheckWALFile(file); + Result res = table.get(get); + if (res.isEmpty()){ + return false; + } + return true; + } + } + + /** + * Checks if we have at least one backup session in hbase:backup This API is used by + * BackupLogCleaner + * @return true, if - at least one session exists in hbase:backup table + * @throws IOException exception + */ + public boolean hasBackupSessions() throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Has backup sessions from hbase:backup"); + } + boolean result = false; + Scan scan = BackupSystemTableHelper.createScanForBackupHistory(); + scan.setCaching(1); + try (Table table = connection.getTable(tableName); + ResultScanner scanner = table.getScanner(scan)) { + if (scanner.next() != null) { + result = true; + } + return result; + } + } + + /** + * BACKUP SETS + */ + + /** + * Get backup set list + * @return backup set list + * @throws IOException + */ + public List listBackupSets() throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug(" Backup set list"); + } + List list = new ArrayList(); + Table table = null; + ResultScanner scanner = null; + try { + table = connection.getTable(tableName); + Scan scan = BackupSystemTableHelper.createScanForBackupSetList(); + scan.setMaxVersions(1); + scanner = table.getScanner(scan); + Result res = null; + while ((res = scanner.next()) != null) { + res.advance(); + list.add(BackupSystemTableHelper.cellKeyToBackupSetName(res.current())); + } + return list; + } finally { + if(scanner != null) { + scanner.close(); + } + if (table != null) { + table.close(); + } + } + } + + /** + * Get backup set description (list of tables) + * @param name - set's name + * @return list of tables in a backup set + * @throws IOException + */ + public List describeBackupSet(String name) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug(" Backup set describe: "+name); + } + Table table = null; + try { + table = connection.getTable(tableName); + Get get = BackupSystemTableHelper.createGetForBackupSet(name); + Result res = table.get(get); + if(res.isEmpty()) return null; + res.advance(); + String[] tables = + BackupSystemTableHelper.cellValueToBackupSet(res.current()); + return toList(tables); + } finally { + if (table != null) { + table.close(); + } + } + } + + private List toList(String[] tables) + { + List list = new ArrayList(tables.length); + for(String name: tables) { + list.add(TableName.valueOf(name)); + } + return list; + } + + /** + * Add backup set (list of tables) + * @param name - set name + * @param newTables - array of tables + * @throws IOException + */ + public void addToBackupSet(String name, String[] newTables) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Backup set add: "+name+" tables ["+ StringUtils.join(newTables, " ")+"]"); + } + Table table = null; + String[] union = null; + try { + table = connection.getTable(tableName); + Get get = BackupSystemTableHelper.createGetForBackupSet(name); + Result res = table.get(get); + if(res.isEmpty()) { + union = newTables; + } else { + res.advance(); + String[] tables = + BackupSystemTableHelper.cellValueToBackupSet(res.current()); + union = merge(tables, newTables); + } + Put put = BackupSystemTableHelper.createPutForBackupSet(name, union); + table.put(put); + } finally { + if (table != null) { + table.close(); + } + } + } + + private String[] merge(String[] tables, String[] newTables) { + List list = new ArrayList(); + // Add all from tables + for(String t: tables){ + list.add(t); + } + for(String nt: newTables){ + if(list.contains(nt)) continue; + list.add(nt); + } + String[] arr = new String[list.size()]; + list.toArray(arr); + return arr; + } + + /** + * Remove tables from backup set (list of tables) + * @param name - set name + * @param toRemove - array of table names + * @throws IOException + */ + public void removeFromBackupSet(String name, String[] toRemove) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug(" Backup set remove from : " + name+" tables ["+ + StringUtils.join(toRemove, " ")+"]"); + } + Table table = null; + String[] disjoint = null; + try { + table = connection.getTable(tableName); + Get get = BackupSystemTableHelper.createGetForBackupSet(name); + Result res = table.get(get); + if (res.isEmpty()) { + LOG.warn("Backup set '"+ name+"' not found."); + return; + } else { + res.advance(); + String[] tables = BackupSystemTableHelper.cellValueToBackupSet(res.current()); + disjoint = disjoin(tables, toRemove); + } + if (disjoint.length > 0) { + Put put = BackupSystemTableHelper.createPutForBackupSet(name, disjoint); + table.put(put); + } else { + // Delete + //describeBackupSet(name); + LOG.warn("Backup set '"+ name+"' does not contain tables ["+ + StringUtils.join(toRemove, " ")+"]"); + } + } finally { + if (table != null) { + table.close(); + } + } + } + + private String[] disjoin(String[] tables, String[] toRemove) { + List list = new ArrayList(); + // Add all from tables + for (String t : tables) { + list.add(t); + } + for (String nt : toRemove) { + if (list.contains(nt)) { + list.remove(nt); + } + } + String[] arr = new String[list.size()]; + list.toArray(arr); + return arr; + } + + /** + * Delete backup set + * @param name set's name + * @throws IOException + */ + public void deleteBackupSet(String name) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug(" Backup set delete: " + name); + } + Table table = null; + try { + table = connection.getTable(tableName); + Delete del = BackupSystemTableHelper.createDeleteForBackupSet(name); + table.delete(del); + } finally { + if (table != null) { + table.close(); + } + } + } + + /** + * Get backup system table descriptor + * @return descriptor + */ + public static HTableDescriptor getSystemTableDescriptor() { + HTableDescriptor tableDesc = new HTableDescriptor(tableName); + HColumnDescriptor colSessionsDesc = new HColumnDescriptor(SESSIONS_FAMILY); + colSessionsDesc.setMaxVersions(1); + // Time to keep backup sessions (secs) + Configuration config = HBaseConfiguration.create(); + int ttl = + config.getInt(HConstants.BACKUP_SYSTEM_TTL_KEY, HConstants.BACKUP_SYSTEM_TTL_DEFAULT); + colSessionsDesc.setTimeToLive(ttl); + tableDesc.addFamily(colSessionsDesc); + HColumnDescriptor colMetaDesc = new HColumnDescriptor(META_FAMILY); + //colDesc.setMaxVersions(1); + tableDesc.addFamily(colMetaDesc); + return tableDesc; + } + + public static String getTableNameAsString() { + return tableName.getNameAsString(); + } + + public static TableName getTableName() { + return tableName; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java new file mode 100644 index 0000000..5eeb128 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTableHelper.java @@ -0,0 +1,428 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Set; + +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.util.Bytes; + + +/** + * A collection for methods used by BackupSystemTable. + */ + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class BackupSystemTableHelper { + + /** + * hbase:backup schema: + * 1. Backup sessions rowkey= "session:" + backupId; value = serialized + * BackupContext + * 2. Backup start code rowkey = "startcode:" + backupRoot; value = startcode + * 3. Incremental backup set rowkey="incrbackupset:" + backupRoot; value=[list of tables] + * 4. Table-RS-timestamp map rowkey="trslm:"+ backupRoot+table_name; value = map[RS-> + * last WAL timestamp] + * 5. RS - WAL ts map rowkey="rslogts:"+backupRoot +server; value = last WAL timestamp + * 6. WALs recorded rowkey="wals:"+WAL unique file name; value = backupId and full WAL file name + */ + + private final static String BACKUP_INFO_PREFIX = "session:"; + private final static String START_CODE_ROW = "startcode:"; + private final static String INCR_BACKUP_SET = "incrbackupset:"; + private final static String TABLE_RS_LOG_MAP_PREFIX = "trslm:"; + private final static String RS_LOG_TS_PREFIX = "rslogts:"; + private final static String WALS_PREFIX = "wals:"; + private final static String SET_KEY_PREFIX = "backupset:"; + + private final static byte[] EMPTY_VALUE = new byte[] {}; + + // Safe delimiter in a string + private final static String NULL = "\u0000"; + + private BackupSystemTableHelper() { + throw new AssertionError("Instantiating utility class..."); + } + + /** + * Creates Put operation for a given backup context object + * @param context backup context + * @return put operation + * @throws IOException exception + */ + static Put createPutForBackupContext(BackupInfo context) throws IOException { + Put put = new Put(rowkey(BACKUP_INFO_PREFIX, context.getBackupId())); + put.addColumn(BackupSystemTable.SESSIONS_FAMILY, "context".getBytes(), context.toByteArray()); + return put; + } + + /** + * Creates Get operation for a given backup id + * @param backupId - backup's ID + * @return get operation + * @throws IOException exception + */ + static Get createGetForBackupContext(String backupId) throws IOException { + Get get = new Get(rowkey(BACKUP_INFO_PREFIX, backupId)); + get.addFamily(BackupSystemTable.SESSIONS_FAMILY); + get.setMaxVersions(1); + return get; + } + + /** + * Creates Delete operation for a given backup id + * @param backupId - backup's ID + * @return delete operation + * @throws IOException exception + */ + public static Delete createDeleteForBackupInfo(String backupId) { + Delete del = new Delete(rowkey(BACKUP_INFO_PREFIX, backupId)); + del.addFamily(BackupSystemTable.SESSIONS_FAMILY); + return del; + } + + /** + * Converts Result to BackupContext + * @param res - HBase result + * @return backup context instance + * @throws IOException exception + */ + static BackupInfo resultToBackupInfo(Result res) throws IOException { + res.advance(); + Cell cell = res.current(); + return cellToBackupInfo(cell); + } + + /** + * Creates Get operation to retrieve start code from hbase:backup + * @return get operation + * @throws IOException exception + */ + static Get createGetForStartCode(String rootPath) throws IOException { + Get get = new Get(rowkey(START_CODE_ROW, rootPath)); + get.addFamily(BackupSystemTable.META_FAMILY); + get.setMaxVersions(1); + return get; + } + + /** + * Creates Put operation to store start code to hbase:backup + * @return put operation + * @throws IOException exception + */ + static Put createPutForStartCode(String startCode, String rootPath) { + Put put = new Put(rowkey(START_CODE_ROW, rootPath)); + put.addColumn(BackupSystemTable.META_FAMILY, "startcode".getBytes(), startCode.getBytes()); + return put; + } + + /** + * Creates Get to retrieve incremental backup table set from hbase:backup + * @return get operation + * @throws IOException exception + */ + static Get createGetForIncrBackupTableSet(String backupRoot) throws IOException { + Get get = new Get(rowkey(INCR_BACKUP_SET, backupRoot)); + get.addFamily(BackupSystemTable.META_FAMILY); + get.setMaxVersions(1); + return get; + } + + /** + * Creates Put to store incremental backup table set + * @param tables tables + * @return put operation + */ + static Put createPutForIncrBackupTableSet(Set tables, String backupRoot) { + Put put = new Put(rowkey(INCR_BACKUP_SET, backupRoot)); + for (TableName table : tables) { + put.addColumn(BackupSystemTable.META_FAMILY, Bytes.toBytes(table.getNameAsString()), + EMPTY_VALUE); + } + return put; + } + + /** + * Creates Scan operation to load backup history + * @return scan operation + */ + static Scan createScanForBackupHistory() { + Scan scan = new Scan(); + byte[] startRow = BACKUP_INFO_PREFIX.getBytes(); + byte[] stopRow = Arrays.copyOf(startRow, startRow.length); + stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); + scan.setStartRow(startRow); + scan.setStopRow(stopRow); + scan.addFamily(BackupSystemTable.SESSIONS_FAMILY); + scan.setMaxVersions(1); + return scan; + } + + /** + * Converts cell to backup context instance. + * @param current - cell + * @return backup context instance + * @throws IOException exception + */ + static BackupInfo cellToBackupInfo(Cell current) throws IOException { + byte[] data = CellUtil.cloneValue(current); + return BackupInfo.fromByteArray(data); + } + + /** + * Creates Put to write RS last roll log timestamp map + * @param table - table + * @param smap - map, containing RS:ts + * @return put operation + */ + static Put createPutForWriteRegionServerLogTimestamp(TableName table, byte[] smap, + String backupRoot) { + Put put = new Put(rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot, NULL, table.getNameAsString())); + put.addColumn(BackupSystemTable.META_FAMILY, "log-roll-map".getBytes(), smap); + return put; + } + + /** + * Creates Scan to load table-> { RS -> ts} map of maps + * @return scan operation + */ + static Scan createScanForReadLogTimestampMap(String backupRoot) { + Scan scan = new Scan(); + byte[] startRow = rowkey(TABLE_RS_LOG_MAP_PREFIX, backupRoot); + byte[] stopRow = Arrays.copyOf(startRow, startRow.length); + stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); + scan.setStartRow(startRow); + scan.setStopRow(stopRow); + scan.addFamily(BackupSystemTable.META_FAMILY); + + return scan; + } + + /** + * Get table name from rowkey + * @param cloneRow rowkey + * @return table name + */ + static String getTableNameForReadLogTimestampMap(byte[] cloneRow) { + String s = new String(cloneRow); + int index = s.lastIndexOf(NULL); + return s.substring(index +1); + } + + /** + * Creates Put to store RS last log result + * @param server - server name + * @param timestamp - log roll result (timestamp) + * @return put operation + */ + static Put createPutForRegionServerLastLogRollResult(String server, + Long timestamp, String backupRoot ) { + Put put = new Put(rowkey(RS_LOG_TS_PREFIX, backupRoot, NULL, server)); + put.addColumn(BackupSystemTable.META_FAMILY, "rs-log-ts".getBytes(), + timestamp.toString().getBytes()); + return put; + } + + /** + * Creates Scan operation to load last RS log roll results + * @return scan operation + */ + static Scan createScanForReadRegionServerLastLogRollResult(String backupRoot) { + Scan scan = new Scan(); + byte[] startRow = rowkey(RS_LOG_TS_PREFIX, backupRoot); + byte[] stopRow = Arrays.copyOf(startRow, startRow.length); + stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); + scan.setStartRow(startRow); + scan.setStopRow(stopRow); + scan.addFamily(BackupSystemTable.META_FAMILY); + scan.setMaxVersions(1); + + return scan; + } + + /** + * Get server's name from rowkey + * @param row - rowkey + * @return server's name + */ + static String getServerNameForReadRegionServerLastLogRollResult(byte[] row) { + String s = new String(row); + int index = s.lastIndexOf(NULL); + return s.substring(index +1); + } + + /** + * Creates put list for list of WAL files + * @param files list of WAL file paths + * @param backupId backup id + * @return put list + * @throws IOException exception + */ + public static List createPutsForAddWALFiles(List files, + String backupId, String backupRoot) + throws IOException { + + List puts = new ArrayList(); + for (String file : files) { + Put put = new Put(rowkey(WALS_PREFIX, BackupClientUtil.getUniqueWALFileNamePart(file))); + put.addColumn(BackupSystemTable.META_FAMILY, "backupId".getBytes(), backupId.getBytes()); + put.addColumn(BackupSystemTable.META_FAMILY, "file".getBytes(), file.getBytes()); + put.addColumn(BackupSystemTable.META_FAMILY, "root".getBytes(), backupRoot.getBytes()); + puts.add(put); + } + return puts; + } + + /** + * Creates Scan operation to load WALs + * TODO: support for backupRoot + * @param backupRoot - path to backup destination + * @return scan operation + */ + public static Scan createScanForGetWALs(String backupRoot) { + Scan scan = new Scan(); + byte[] startRow = WALS_PREFIX.getBytes(); + byte[] stopRow = Arrays.copyOf(startRow, startRow.length); + stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); + scan.setStartRow(startRow); + scan.setStopRow(stopRow); + scan.addFamily(BackupSystemTable.META_FAMILY); + return scan; + } + /** + * Creates Get operation for a given wal file name + * TODO: support for backup destination + * @param file file + * @return get operation + * @throws IOException exception + */ + public static Get createGetForCheckWALFile(String file) throws IOException { + Get get = new Get(rowkey(WALS_PREFIX, BackupClientUtil.getUniqueWALFileNamePart(file))); + // add backup root column + get.addFamily(BackupSystemTable.META_FAMILY); + return get; + } + + + /** + * Creates Scan operation to load backup set list + * @return scan operation + */ + static Scan createScanForBackupSetList() { + Scan scan = new Scan(); + byte[] startRow = SET_KEY_PREFIX.getBytes(); + byte[] stopRow = Arrays.copyOf(startRow, startRow.length); + stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1); + scan.setStartRow(startRow); + scan.setStopRow(stopRow); + scan.addFamily(BackupSystemTable.META_FAMILY); + return scan; + } + + /** + * Creates Get operation to load backup set content + * @return get operation + */ + static Get createGetForBackupSet(String name) { + Get get = new Get(rowkey(SET_KEY_PREFIX, name)); + get.addFamily(BackupSystemTable.META_FAMILY); + return get; + } + + /** + * Creates Delete operation to delete backup set content + * @param name - backup set's name + * @return delete operation + */ + static Delete createDeleteForBackupSet(String name) { + Delete del = new Delete(rowkey(SET_KEY_PREFIX, name)); + del.addFamily(BackupSystemTable.META_FAMILY); + return del; + } + + + /** + * Creates Put operation to update backup set content + * @param name - backup set's name + * @param tables - list of tables + * @return put operation + */ + static Put createPutForBackupSet(String name, String[] tables) { + Put put = new Put(rowkey(SET_KEY_PREFIX, name)); + byte[] value = convertToByteArray(tables); + put.addColumn(BackupSystemTable.META_FAMILY, "tables".getBytes(), value); + return put; + } + + private static byte[] convertToByteArray(String[] tables) { + return StringUtils.join(tables, ",").getBytes(); + } + + + /** + * Converts cell to backup set list. + * @param current - cell + * @return backup set + * @throws IOException + */ + static String[] cellValueToBackupSet(Cell current) throws IOException { + byte[] data = CellUtil.cloneValue(current); + if( data != null && data.length > 0){ + return new String(data).split(","); + } else{ + return new String[0]; + } + } + + /** + * Converts cell key to backup set name. + * @param current - cell + * @return backup set name + * @throws IOException + */ + static String cellKeyToBackupSetName(Cell current) throws IOException { + byte[] data = CellUtil.cloneRow(current); + return new String(data).substring(SET_KEY_PREFIX.length()); + } + + static byte[] rowkey(String s, String ... other){ + StringBuilder sb = new StringBuilder(s); + for(String ss: other){ + sb.append(ss); + } + return sb.toString().getBytes(); + } + +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java new file mode 100644 index 0000000..54dd9ff --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupClientUtil.java @@ -0,0 +1,368 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.util; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URLDecoder; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.TreeMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * A collection of methods used by multiple classes to backup HBase tables. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class BackupClientUtil { + protected static final Log LOG = LogFactory.getLog(BackupClientUtil.class); + public static final String LOGNAME_SEPARATOR = "."; + + private BackupClientUtil(){ + throw new AssertionError("Instantiating utility class..."); + } + + /** + * Check whether the backup path exist + * @param backupStr backup + * @param conf configuration + * @return Yes if path exists + * @throws IOException exception + */ + public static boolean checkPathExist(String backupStr, Configuration conf) + throws IOException { + boolean isExist = false; + Path backupPath = new Path(backupStr); + FileSystem fileSys = backupPath.getFileSystem(conf); + String targetFsScheme = fileSys.getUri().getScheme(); + if (LOG.isTraceEnabled()) { + LOG.trace("Schema of given url: " + backupStr + " is: " + targetFsScheme); + } + if (fileSys.exists(backupPath)) { + isExist = true; + } + return isExist; + } + + // check target path first, confirm it doesn't exist before backup + public static void checkTargetDir(String backupRootPath, Configuration conf) throws IOException { + boolean targetExists = false; + try { + targetExists = checkPathExist(backupRootPath, conf); + } catch (IOException e) { + String expMsg = e.getMessage(); + String newMsg = null; + if (expMsg.contains("No FileSystem for scheme")) { + newMsg = + "Unsupported filesystem scheme found in the backup target url. Error Message: " + + newMsg; + LOG.error(newMsg); + throw new IOException(newMsg); + } else { + throw e; + } + } + + if (targetExists) { + LOG.info("Using existing backup root dir: " + backupRootPath); + } else { + LOG.info("Backup root dir " + backupRootPath + " does not exist. Will be created."); + } + } + + /** + * Get the min value for all the Values a map. + * @param map map + * @return the min value + */ + public static Long getMinValue(HashMap map) { + Long minTimestamp = null; + if (map != null) { + ArrayList timestampList = new ArrayList(map.values()); + Collections.sort(timestampList); + // The min among all the RS log timestamps will be kept in hbase:backup table. + minTimestamp = timestampList.get(0); + } + return minTimestamp; + } + + /** + * Parses host name:port from archived WAL path + * @param p path + * @return host name + * @throws IOException exception + */ + public static String parseHostFromOldLog(Path p) { + try { + String n = p.getName(); + int idx = n.lastIndexOf(LOGNAME_SEPARATOR); + String s = URLDecoder.decode(n.substring(0, idx), "UTF8"); + return ServerName.parseHostname(s) + ":" + ServerName.parsePort(s); + } catch (Exception e) { + LOG.warn("Skip log file (can't parse): " + p); + return null; + } + } + + /** + * Given the log file, parse the timestamp from the file name. The timestamp is the last number. + * @param p a path to the log file + * @return the timestamp + * @throws IOException exception + */ + public static Long getCreationTime(Path p) throws IOException { + int idx = p.getName().lastIndexOf(LOGNAME_SEPARATOR); + if (idx < 0) { + throw new IOException("Cannot parse timestamp from path " + p); + } + String ts = p.getName().substring(idx + 1); + return Long.parseLong(ts); + } + + public static List getFiles(FileSystem fs, Path rootDir, List files, + PathFilter filter) throws FileNotFoundException, IOException { + RemoteIterator it = fs.listFiles(rootDir, true); + + while (it.hasNext()) { + LocatedFileStatus lfs = it.next(); + if (lfs.isDirectory()) { + continue; + } + // apply filter + if (filter.accept(lfs.getPath())) { + files.add(lfs.getPath().toString()); + } + } + return files; + } + + public static void cleanupBackupData(BackupInfo context, Configuration conf) + throws IOException + { + cleanupHLogDir(context, conf); + cleanupTargetDir(context, conf); + } + + /** + * Clean up directories which are generated when DistCp copying hlogs. + * @throws IOException + */ + private static void cleanupHLogDir(BackupInfo backupContext, Configuration conf) + throws IOException { + + String logDir = backupContext.getHLogTargetDir(); + if (logDir == null) { + LOG.warn("No log directory specified for " + backupContext.getBackupId()); + return; + } + + Path rootPath = new Path(logDir).getParent(); + FileSystem fs = FileSystem.get(rootPath.toUri(), conf); + FileStatus[] files = listStatus(fs, rootPath, null); + if (files == null) { + return; + } + for (FileStatus file : files) { + LOG.debug("Delete log files: " + file.getPath().getName()); + fs.delete(file.getPath(), true); + } + } + + /** + * Clean up the data at target directory + */ + private static void cleanupTargetDir(BackupInfo backupContext, Configuration conf) { + try { + // clean up the data at target directory + LOG.debug("Trying to cleanup up target dir : " + backupContext.getBackupId()); + String targetDir = backupContext.getTargetRootDir(); + if (targetDir == null) { + LOG.warn("No target directory specified for " + backupContext.getBackupId()); + return; + } + + FileSystem outputFs = + FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf); + + for (TableName table : backupContext.getTables()) { + Path targetDirPath = + new Path(getTableBackupDir(backupContext.getTargetRootDir(), + backupContext.getBackupId(), table)); + if (outputFs.delete(targetDirPath, true)) { + LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done."); + } else { + LOG.info("No data has been found in " + targetDirPath.toString() + "."); + } + + Path tableDir = targetDirPath.getParent(); + FileStatus[] backups = listStatus(outputFs, tableDir, null); + if (backups == null || backups.length == 0) { + outputFs.delete(tableDir, true); + LOG.debug(tableDir.toString() + " is empty, remove it."); + } + } + outputFs.delete(new Path(targetDir, backupContext.getBackupId()), true); + } catch (IOException e1) { + LOG.error("Cleaning up backup data of " + backupContext.getBackupId() + " at " + + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); + } + } + + /** + * Given the backup root dir, backup id and the table name, return the backup image location, + * which is also where the backup manifest file is. return value look like: + * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/backup_1396650096738/default/t1_dn/" + * @param backupRootDir backup root directory + * @param backupId backup id + * @param tableName table name + * @return backupPath String for the particular table + */ + public static String getTableBackupDir(String backupRootDir, String backupId, + TableName tableName) { + return backupRootDir + Path.SEPARATOR+ backupId + Path.SEPARATOR + + tableName.getNamespaceAsString() + Path.SEPARATOR + + tableName.getQualifierAsString() + Path.SEPARATOR ; + } + + public static TableName[] parseTableNames(String tables) { + if (tables == null) { + return null; + } + String[] tableArray = tables.split(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND); + + TableName[] ret = new TableName[tableArray.length]; + for (int i = 0; i < tableArray.length; i++) { + ret[i] = TableName.valueOf(tableArray[i]); + } + return ret; + } + + /** + * Sort history list by start time in descending order. + * @param historyList history list + * @return sorted list of BackupCompleteData + */ + public static ArrayList sortHistoryListDesc( + ArrayList historyList) { + ArrayList list = new ArrayList(); + TreeMap map = new TreeMap(); + for (BackupInfo h : historyList) { + map.put(Long.toString(h.getStartTs()), h); + } + Iterator i = map.descendingKeySet().iterator(); + while (i.hasNext()) { + list.add(map.get(i.next())); + } + return list; + } + + /** + * Returns WAL file name + * @param walFileName WAL file name + * @return WAL file name + * @throws IOException exception + * @throws IllegalArgumentException exception + */ + public static String getUniqueWALFileNamePart(String walFileName) throws IOException { + return getUniqueWALFileNamePart(new Path(walFileName)); + } + + /** + * Returns WAL file name + * @param p - WAL file path + * @return WAL file name + * @throws IOException exception + */ + public static String getUniqueWALFileNamePart(Path p) throws IOException { + return p.getName(); + } + + /** + * Calls fs.listStatus() and treats FileNotFoundException as non-fatal + * This accommodates differences between hadoop versions, where hadoop 1 + * does not throw a FileNotFoundException, and return an empty FileStatus[] + * while Hadoop 2 will throw FileNotFoundException. + * + * @param fs file system + * @param dir directory + * @param filter path filter + * @return null if dir is empty or doesn't exist, otherwise FileStatus array + */ + public static FileStatus [] listStatus(final FileSystem fs, + final Path dir, final PathFilter filter) throws IOException { + FileStatus [] status = null; + try { + status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter); + } catch (FileNotFoundException fnfe) { + // if directory doesn't exist, return null + if (LOG.isTraceEnabled()) { + LOG.trace(dir + " doesn't exist"); + } + } + if (status == null || status.length < 1) return null; + return status; + } + + /** + * Return the 'path' component of a Path. In Hadoop, Path is an URI. This + * method returns the 'path' component of a Path's URI: e.g. If a Path is + * hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir, + * this method returns /hbase_trunk/TestTable/compaction.dir. + * This method is useful if you want to print out a Path without qualifying + * Filesystem instance. + * @param p Filesystem Path whose 'path' component we are to return. + * @return Path portion of the Filesystem + */ + public static String getPath(Path p) { + return p.toUri().getPath(); + } + + /** + * Given the backup root dir and the backup id, return the log file location for an incremental + * backup. + * @param backupRootDir backup root directory + * @param backupId backup id + * @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738" + */ + public static String getLogBackupDir(String backupRootDir, String backupId) { + return backupRootDir + Path.SEPARATOR + backupId+ Path.SEPARATOR + + HConstants.HREGION_LOGDIR_NAME; + } + +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java new file mode 100644 index 0000000..76402c7 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/util/BackupSet.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.util; +import java.util.List; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +/** + * Backup set is a named group of HBase tables, + * which are managed together by Backup/Restore + * framework. Instead of using list of tables in backup or restore + * operation, one can use set's name instead. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class BackupSet { + private final String name; + private final List tables; + + public BackupSet(String name, List tables) { + this.name = name; + this.tables = tables; + } + + public String getName() { + return name; + } + + public List getTables() { + return tables; + } + + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(name).append("={"); + for (int i = 0; i < tables.size(); i++) { + sb.append(tables.get(i)); + if (i < tables.size() - 1) { + sb.append(","); + } + } + sb.append("}"); + return sb.toString(); + } + +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java new file mode 100644 index 0000000..2d382ca --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java @@ -0,0 +1,155 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.Future; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupRequest; +import org.apache.hadoop.hbase.backup.RestoreRequest; +import org.apache.hadoop.hbase.backup.util.BackupSet; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +/** + * The administrative API for HBase Backup. Obtain an instance from + * an {@link Admin#getBackupAdmin()} and call {@link #close()} afterwards. + *

BackupAdmin can be used to create backups, restore data from backups and for + * other backup-related operations. + * + * @see Admin + * @since 2.0 + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving + +public interface BackupAdmin extends Closeable{ + + /** + * Backs up given list of tables fully. Synchronous operation. + * + * @param userRequest BackupRequest instance which contains the following members: + * type whether the backup is full or incremental + * tableList list of tables to backup + * targetRootDir root directory for saving the backup + * workers number of parallel workers. -1 - system defined + * bandwidth bandwidth per worker in MB per second. -1 - unlimited + * @return the backup Id + */ + + public String backupTables(final BackupRequest userRequest) throws IOException; + + /** + * Backs up given list of tables fully. Asynchronous operation. + * + * @param userRequest BackupRequest instance which contains the following members: + * type whether the backup is full or incremental + * tableList list of tables to backup + * targetRootDir root dir for saving the backup + * workers number of paralle workers. -1 - system defined + * bandwidth bandwidth per worker in MB per sec. -1 - unlimited + * @return the backup Id future + */ + public Future backupTablesAsync(final BackupRequest userRequest) throws IOException; + + /** + * Describe backup image command + * @param backupId - backup id + * @return backup info + * @throws IOException exception + */ + public BackupInfo getBackupInfo(String backupId) throws IOException; + + /** + * Show backup progress command + * @param backupId - backup id (may be null) + * @return backup progress (0-100%), -1 if no active sessions + * or session not found + * @throws IOException exception + */ + public int getProgress(String backupId) throws IOException; + + /** + * Delete backup image command + * @param backupIds - backup id + * @return total number of deleted sessions + * @throws IOException exception + */ + public int deleteBackups(String[] backupIds) throws IOException; + + /** + * Show backup history command + * @param n - last n backup sessions + * @return list of backup infos + * @throws IOException exception + */ + public List getHistory(int n) throws IOException; + + /** + * Backup sets list command - list all backup sets. Backup set is + * a named group of tables. + * @return all registered backup sets + * @throws IOException exception + */ + public List listBackupSets() throws IOException; + + /** + * Backup set describe command. Shows list of tables in + * this particular backup set. + * @param name set name + * @return backup set description or null + * @throws IOException exception + */ + public BackupSet getBackupSet(String name) throws IOException; + + /** + * Delete backup set command + * @param name - backup set name + * @return true, if success, false - otherwise + * @throws IOException exception + */ + public boolean deleteBackupSet(String name) throws IOException; + + /** + * Add tables to backup set command + * @param name - name of backup set. + * @param tables - list of tables to be added to this set. + * @throws IOException exception + */ + public void addToBackupSet(String name, TableName[] tables) throws IOException; + + /** + * Remove tables from backup set + * @param name - name of backup set. + * @param tables - list of tables to be removed from this set. + * @throws IOException exception + */ + public void removeFromBackupSet(String name, String[] tables) throws IOException; + + /** + * Restore backup + * @param request - restore request + * @throws IOException exception + */ + public void restore(RestoreRequest request) throws IOException; + +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java new file mode 100644 index 0000000..81413c6 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java @@ -0,0 +1,226 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Future; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.BackupRequest; +import org.apache.hadoop.hbase.backup.BackupRestoreClientFactory; +import org.apache.hadoop.hbase.backup.RestoreClient; +import org.apache.hadoop.hbase.backup.RestoreRequest; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.backup.util.BackupSet; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * The administrative API implementation for HBase Backup . Obtain an instance from + * an {@link Admin#getBackupAdmin()} and call {@link #close()} afterwards. + *

BackupAdmin can be used to create backups, restore data from backups and for + * other backup-related operations. + * + * @see Admin + * @since 2.0 + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving + +public class HBaseBackupAdmin implements BackupAdmin { + private static final Log LOG = LogFactory.getLog(HBaseBackupAdmin.class); + + private final HBaseAdmin admin; + private final Connection conn; + + HBaseBackupAdmin(HBaseAdmin admin) { + this.admin = admin; + this.conn = admin.getConnection(); + } + + + @Override + public void close() throws IOException { + } + + @Override + public BackupInfo getBackupInfo(String backupId) throws IOException { + BackupInfo backupInfo = null; + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + backupInfo = table.readBackupInfo(backupId); + return backupInfo; + } + } + + @Override + public int getProgress(String backupId) throws IOException { + BackupInfo backupInfo = null; + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + if (backupId == null) { + ArrayList recentSessions = + table.getBackupContexts(BackupState.RUNNING); + if (recentSessions.isEmpty()) { + LOG.warn("No ongoing sessions found."); + return -1; + } + // else show status for ongoing session + // must be one maximum + return recentSessions.get(0).getProgress(); + } else { + + backupInfo = table.readBackupInfo(backupId); + if (backupInfo != null) { + return backupInfo.getProgress(); + } else { + LOG.warn("No information found for backupID=" + backupId); + return -1; + } + } + } + } + + @Override + public int deleteBackups(String[] backupIds) throws IOException { + BackupInfo backupInfo = null; + String backupId = null; + int totalDeleted = 0; + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + for (int i = 0; i < backupIds.length; i++) { + backupId = backupIds[i]; + LOG.info("Deleting backup for backupID=" + backupId + " ..."); + backupInfo = table.readBackupInfo(backupId); + if (backupInfo != null) { + BackupClientUtil.cleanupBackupData(backupInfo, admin.getConfiguration()); + table.deleteBackupInfo(backupInfo.getBackupId()); + LOG.info("Delete backup for backupID=" + backupId + " completed."); + totalDeleted++; + } else { + LOG.warn("Delete backup failed: no information found for backupID=" + backupId); + } + } + } + return totalDeleted; + } + + @Override + public List getHistory(int n) throws IOException { + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + List history = table.getBackupHistory(); + if( history.size() <= n) return history; + List list = new ArrayList(); + for(int i=0; i < n; i++){ + list.add(history.get(i)); + } + return list; + } + } + + @Override + public List listBackupSets() throws IOException { + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + List list = table.listBackupSets(); + List bslist = new ArrayList(); + for (String s : list) { + List tables = table.describeBackupSet(s); + if(tables != null){ + bslist.add( new BackupSet(s, tables)); + } + } + return bslist; + } + } + + @Override + public BackupSet getBackupSet(String name) throws IOException { + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + List list = table.describeBackupSet(name); + if(list == null) return null; + return new BackupSet(name, list); + } + } + + @Override + public boolean deleteBackupSet(String name) throws IOException { + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + if(table.describeBackupSet(name) == null) { + return false; + } + table.deleteBackupSet(name); + return true; + } + } + + @Override + public void addToBackupSet(String name, TableName[] tables) throws IOException { + String[] tableNames = new String[tables.length]; + for(int i = 0; i < tables.length; i++){ + tableNames[i] = tables[i].getNameAsString(); + if (!admin.tableExists(TableName.valueOf(tableNames[i]))) { + throw new IOException("Cannot add " + tableNames[i] + " because it doesn't exist"); + } + } + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + table.addToBackupSet(name, tableNames); + LOG.info("Added tables ["+StringUtils.join(tableNames, " ")+"] to '" + name + "' backup set"); + } + } + + @Override + public void removeFromBackupSet(String name, String[] tables) throws IOException { + LOG.info("Removing tables ["+ StringUtils.join(tables, " ")+"] from '" + name + "'"); + try (final BackupSystemTable table = new BackupSystemTable(conn)) { + table.removeFromBackupSet(name, tables); + LOG.info("Removing tables ["+ StringUtils.join(tables, " ")+"] from '" + name + "' completed."); + } + } + + @Override + public void restore(RestoreRequest request) throws IOException { + RestoreClient client = BackupRestoreClientFactory.getRestoreClient(admin.getConfiguration()); + client.restore(request.getBackupRootDir(), + request.getBackupId(), + request.isCheck(), + request.getFromTables(), + request.getToTables(), + request.isOverwrite()); + + } + + @Override + public String backupTables(final BackupRequest userRequest) + throws IOException { + return admin.backupTables(userRequest); + } + + + @Override + public Future backupTablesAsync(final BackupRequest userRequest) + throws IOException { + return admin.backupTablesAsync(userRequest); + } + + +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/backup/BackupType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/backup/BackupType.java new file mode 100644 index 0000000..79f4636 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/backup/BackupType.java @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +@InterfaceAudience.Private +public enum BackupType { + FULL, INCREMENTAL +} diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java new file mode 100644 index 0000000..4699c81 --- /dev/null +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/BackupProtos.java @@ -0,0 +1,10860 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: Backup.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class BackupProtos { + private BackupProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + /** + * Protobuf enum {@code hbase.pb.FullTableBackupState} + */ + public enum FullTableBackupState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * PRE_SNAPSHOT_TABLE = 1; + */ + PRE_SNAPSHOT_TABLE(0, 1), + /** + * SNAPSHOT_TABLES = 2; + */ + SNAPSHOT_TABLES(1, 2), + /** + * SNAPSHOT_COPY = 3; + */ + SNAPSHOT_COPY(2, 3), + /** + * BACKUP_COMPLETE = 4; + */ + BACKUP_COMPLETE(3, 4), + ; + + /** + * PRE_SNAPSHOT_TABLE = 1; + */ + public static final int PRE_SNAPSHOT_TABLE_VALUE = 1; + /** + * SNAPSHOT_TABLES = 2; + */ + public static final int SNAPSHOT_TABLES_VALUE = 2; + /** + * SNAPSHOT_COPY = 3; + */ + public static final int SNAPSHOT_COPY_VALUE = 3; + /** + * BACKUP_COMPLETE = 4; + */ + public static final int BACKUP_COMPLETE_VALUE = 4; + + + public final int getNumber() { return value; } + + public static FullTableBackupState valueOf(int value) { + switch (value) { + case 1: return PRE_SNAPSHOT_TABLE; + case 2: return SNAPSHOT_TABLES; + case 3: return SNAPSHOT_COPY; + case 4: return BACKUP_COMPLETE; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public FullTableBackupState findValueByNumber(int number) { + return FullTableBackupState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(0); + } + + private static final FullTableBackupState[] VALUES = values(); + + public static FullTableBackupState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private FullTableBackupState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.FullTableBackupState) + } + + /** + * Protobuf enum {@code hbase.pb.IncrementalTableBackupState} + */ + public enum IncrementalTableBackupState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * PREPARE_INCREMENTAL = 1; + */ + PREPARE_INCREMENTAL(0, 1), + /** + * INCREMENTAL_COPY = 2; + */ + INCREMENTAL_COPY(1, 2), + /** + * INCR_BACKUP_COMPLETE = 3; + */ + INCR_BACKUP_COMPLETE(2, 3), + ; + + /** + * PREPARE_INCREMENTAL = 1; + */ + public static final int PREPARE_INCREMENTAL_VALUE = 1; + /** + * INCREMENTAL_COPY = 2; + */ + public static final int INCREMENTAL_COPY_VALUE = 2; + /** + * INCR_BACKUP_COMPLETE = 3; + */ + public static final int INCR_BACKUP_COMPLETE_VALUE = 3; + + + public final int getNumber() { return value; } + + public static IncrementalTableBackupState valueOf(int value) { + switch (value) { + case 1: return PREPARE_INCREMENTAL; + case 2: return INCREMENTAL_COPY; + case 3: return INCR_BACKUP_COMPLETE; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public IncrementalTableBackupState findValueByNumber(int number) { + return IncrementalTableBackupState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(1); + } + + private static final IncrementalTableBackupState[] VALUES = values(); + + public static IncrementalTableBackupState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private IncrementalTableBackupState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.IncrementalTableBackupState) + } + + /** + * Protobuf enum {@code hbase.pb.BackupType} + */ + public enum BackupType + implements com.google.protobuf.ProtocolMessageEnum { + /** + * FULL = 0; + */ + FULL(0, 0), + /** + * INCREMENTAL = 1; + */ + INCREMENTAL(1, 1), + ; + + /** + * FULL = 0; + */ + public static final int FULL_VALUE = 0; + /** + * INCREMENTAL = 1; + */ + public static final int INCREMENTAL_VALUE = 1; + + + public final int getNumber() { return value; } + + public static BackupType valueOf(int value) { + switch (value) { + case 0: return FULL; + case 1: return INCREMENTAL; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public BackupType findValueByNumber(int number) { + return BackupType.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(2); + } + + private static final BackupType[] VALUES = values(); + + public static BackupType valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private BackupType(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.BackupType) + } + + public interface SnapshotTableStateDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.TableName table = 1; + /** + * required .hbase.pb.TableName table = 1; + */ + boolean hasTable(); + /** + * required .hbase.pb.TableName table = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable(); + /** + * required .hbase.pb.TableName table = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder(); + + // required string snapshotName = 2; + /** + * required string snapshotName = 2; + */ + boolean hasSnapshotName(); + /** + * required string snapshotName = 2; + */ + java.lang.String getSnapshotName(); + /** + * required string snapshotName = 2; + */ + com.google.protobuf.ByteString + getSnapshotNameBytes(); + } + /** + * Protobuf type {@code hbase.pb.SnapshotTableStateData} + */ + public static final class SnapshotTableStateData extends + com.google.protobuf.GeneratedMessage + implements SnapshotTableStateDataOrBuilder { + // Use SnapshotTableStateData.newBuilder() to construct. + private SnapshotTableStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SnapshotTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SnapshotTableStateData defaultInstance; + public static SnapshotTableStateData getDefaultInstance() { + return defaultInstance; + } + + public SnapshotTableStateData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SnapshotTableStateData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = table_.toBuilder(); + } + table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(table_); + table_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + bitField0_ |= 0x00000002; + snapshotName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SnapshotTableStateData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SnapshotTableStateData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.TableName table = 1; + public static final int TABLE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_; + /** + * required .hbase.pb.TableName table = 1; + */ + public boolean hasTable() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + return table_; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + return table_; + } + + // required string snapshotName = 2; + public static final int SNAPSHOTNAME_FIELD_NUMBER = 2; + private java.lang.Object snapshotName_; + /** + * required string snapshotName = 2; + */ + public boolean hasSnapshotName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string snapshotName = 2; + */ + public java.lang.String getSnapshotName() { + java.lang.Object ref = snapshotName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + snapshotName_ = s; + } + return s; + } + } + /** + * required string snapshotName = 2; + */ + public com.google.protobuf.ByteString + getSnapshotNameBytes() { + java.lang.Object ref = snapshotName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + snapshotName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + snapshotName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTable()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSnapshotName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTable().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, table_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getSnapshotNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, table_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getSnapshotNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData) obj; + + boolean result = true; + result = result && (hasTable() == other.hasTable()); + if (hasTable()) { + result = result && getTable() + .equals(other.getTable()); + } + result = result && (hasSnapshotName() == other.hasSnapshotName()); + if (hasSnapshotName()) { + result = result && getSnapshotName() + .equals(other.getSnapshotName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTable()) { + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + } + if (hasSnapshotName()) { + hash = (37 * hash) + SNAPSHOTNAME_FIELD_NUMBER; + hash = (53 * hash) + getSnapshotName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.SnapshotTableStateData} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + snapshotName_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_SnapshotTableStateData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableBuilder_ == null) { + result.table_ = table_; + } else { + result.table_ = tableBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.snapshotName_ = snapshotName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData.getDefaultInstance()) return this; + if (other.hasTable()) { + mergeTable(other.getTable()); + } + if (other.hasSnapshotName()) { + bitField0_ |= 0x00000002; + snapshotName_ = other.snapshotName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTable()) { + + return false; + } + if (!hasSnapshotName()) { + + return false; + } + if (!getTable().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.SnapshotTableStateData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.TableName table = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableBuilder_; + /** + * required .hbase.pb.TableName table = 1; + */ + public boolean hasTable() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + if (tableBuilder_ == null) { + return table_; + } else { + return tableBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + onChanged(); + } else { + tableBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder setTable( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableBuilder_ == null) { + table_ = builderForValue.build(); + onChanged(); + } else { + tableBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + table_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial(); + } else { + table_ = value; + } + onChanged(); + } else { + tableBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder clearTable() { + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + if (tableBuilder_ != null) { + return tableBuilder_.getMessageOrBuilder(); + } else { + return table_; + } + } + /** + * required .hbase.pb.TableName table = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableFieldBuilder() { + if (tableBuilder_ == null) { + tableBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + table_, + getParentForChildren(), + isClean()); + table_ = null; + } + return tableBuilder_; + } + + // required string snapshotName = 2; + private java.lang.Object snapshotName_ = ""; + /** + * required string snapshotName = 2; + */ + public boolean hasSnapshotName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string snapshotName = 2; + */ + public java.lang.String getSnapshotName() { + java.lang.Object ref = snapshotName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + snapshotName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string snapshotName = 2; + */ + public com.google.protobuf.ByteString + getSnapshotNameBytes() { + java.lang.Object ref = snapshotName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + snapshotName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string snapshotName = 2; + */ + public Builder setSnapshotName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + snapshotName_ = value; + onChanged(); + return this; + } + /** + * required string snapshotName = 2; + */ + public Builder clearSnapshotName() { + bitField0_ = (bitField0_ & ~0x00000002); + snapshotName_ = getDefaultInstance().getSnapshotName(); + onChanged(); + return this; + } + /** + * required string snapshotName = 2; + */ + public Builder setSnapshotNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + snapshotName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.SnapshotTableStateData) + } + + static { + defaultInstance = new SnapshotTableStateData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.SnapshotTableStateData) + } + + public interface BackupImageOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string backup_id = 1; + /** + * required string backup_id = 1; + */ + boolean hasBackupId(); + /** + * required string backup_id = 1; + */ + java.lang.String getBackupId(); + /** + * required string backup_id = 1; + */ + com.google.protobuf.ByteString + getBackupIdBytes(); + + // required .hbase.pb.BackupType backup_type = 2; + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + boolean hasBackupType(); + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType(); + + // required string root_dir = 3; + /** + * required string root_dir = 3; + */ + boolean hasRootDir(); + /** + * required string root_dir = 3; + */ + java.lang.String getRootDir(); + /** + * required string root_dir = 3; + */ + com.google.protobuf.ByteString + getRootDirBytes(); + + // repeated .hbase.pb.TableName table_list = 4; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + java.util.List + getTableListList(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + int getTableListCount(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + java.util.List + getTableListOrBuilderList(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index); + + // required uint64 start_ts = 5; + /** + * required uint64 start_ts = 5; + */ + boolean hasStartTs(); + /** + * required uint64 start_ts = 5; + */ + long getStartTs(); + + // required uint64 complete_ts = 6; + /** + * required uint64 complete_ts = 6; + */ + boolean hasCompleteTs(); + /** + * required uint64 complete_ts = 6; + */ + long getCompleteTs(); + + // repeated .hbase.pb.BackupImage ancestors = 7; + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + java.util.List + getAncestorsList(); + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getAncestors(int index); + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + int getAncestorsCount(); + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + java.util.List + getAncestorsOrBuilderList(); + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getAncestorsOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.BackupImage} + */ + public static final class BackupImage extends + com.google.protobuf.GeneratedMessage + implements BackupImageOrBuilder { + // Use BackupImage.newBuilder() to construct. + private BackupImage(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BackupImage(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BackupImage defaultInstance; + public static BackupImage getDefaultInstance() { + return defaultInstance; + } + + public BackupImage getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BackupImage( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + backupId_ = input.readBytes(); + break; + } + case 16: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + backupType_ = value; + } + break; + } + case 26: { + bitField0_ |= 0x00000004; + rootDir_ = input.readBytes(); + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + tableList_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + case 40: { + bitField0_ |= 0x00000008; + startTs_ = input.readUInt64(); + break; + } + case 48: { + bitField0_ |= 0x00000010; + completeTs_ = input.readUInt64(); + break; + } + case 58: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + ancestors_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + ancestors_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = java.util.Collections.unmodifiableList(tableList_); + } + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + ancestors_ = java.util.Collections.unmodifiableList(ancestors_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BackupImage parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BackupImage(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string backup_id = 1; + public static final int BACKUP_ID_FIELD_NUMBER = 1; + private java.lang.Object backupId_; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + backupId_ = s; + } + return s; + } + } + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .hbase.pb.BackupType backup_type = 2; + public static final int BACKUP_TYPE_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType backupType_; + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + public boolean hasBackupType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType() { + return backupType_; + } + + // required string root_dir = 3; + public static final int ROOT_DIR_FIELD_NUMBER = 3; + private java.lang.Object rootDir_; + /** + * required string root_dir = 3; + */ + public boolean hasRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string root_dir = 3; + */ + public java.lang.String getRootDir() { + java.lang.Object ref = rootDir_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + rootDir_ = s; + } + return s; + } + } + /** + * required string root_dir = 3; + */ + public com.google.protobuf.ByteString + getRootDirBytes() { + java.lang.Object ref = rootDir_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + rootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .hbase.pb.TableName table_list = 4; + public static final int TABLE_LIST_FIELD_NUMBER = 4; + private java.util.List tableList_; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List getTableListList() { + return tableList_; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List + getTableListOrBuilderList() { + return tableList_; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public int getTableListCount() { + return tableList_.size(); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { + return tableList_.get(index); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index) { + return tableList_.get(index); + } + + // required uint64 start_ts = 5; + public static final int START_TS_FIELD_NUMBER = 5; + private long startTs_; + /** + * required uint64 start_ts = 5; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 start_ts = 5; + */ + public long getStartTs() { + return startTs_; + } + + // required uint64 complete_ts = 6; + public static final int COMPLETE_TS_FIELD_NUMBER = 6; + private long completeTs_; + /** + * required uint64 complete_ts = 6; + */ + public boolean hasCompleteTs() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint64 complete_ts = 6; + */ + public long getCompleteTs() { + return completeTs_; + } + + // repeated .hbase.pb.BackupImage ancestors = 7; + public static final int ANCESTORS_FIELD_NUMBER = 7; + private java.util.List ancestors_; + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public java.util.List getAncestorsList() { + return ancestors_; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public java.util.List + getAncestorsOrBuilderList() { + return ancestors_; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public int getAncestorsCount() { + return ancestors_.size(); + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getAncestors(int index) { + return ancestors_.get(index); + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getAncestorsOrBuilder( + int index) { + return ancestors_.get(index); + } + + private void initFields() { + backupId_ = ""; + backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + rootDir_ = ""; + tableList_ = java.util.Collections.emptyList(); + startTs_ = 0L; + completeTs_ = 0L; + ancestors_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBackupId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasBackupType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRootDir()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasStartTs()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasCompleteTs()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTableListCount(); i++) { + if (!getTableList(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getAncestorsCount(); i++) { + if (!getAncestors(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getBackupIdBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeEnum(2, backupType_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getRootDirBytes()); + } + for (int i = 0; i < tableList_.size(); i++) { + output.writeMessage(4, tableList_.get(i)); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(5, startTs_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeUInt64(6, completeTs_); + } + for (int i = 0; i < ancestors_.size(); i++) { + output.writeMessage(7, ancestors_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getBackupIdBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(2, backupType_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getRootDirBytes()); + } + for (int i = 0; i < tableList_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, tableList_.get(i)); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(5, startTs_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(6, completeTs_); + } + for (int i = 0; i < ancestors_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, ancestors_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) obj; + + boolean result = true; + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); + } + result = result && (hasBackupType() == other.hasBackupType()); + if (hasBackupType()) { + result = result && + (getBackupType() == other.getBackupType()); + } + result = result && (hasRootDir() == other.hasRootDir()); + if (hasRootDir()) { + result = result && getRootDir() + .equals(other.getRootDir()); + } + result = result && getTableListList() + .equals(other.getTableListList()); + result = result && (hasStartTs() == other.hasStartTs()); + if (hasStartTs()) { + result = result && (getStartTs() + == other.getStartTs()); + } + result = result && (hasCompleteTs() == other.hasCompleteTs()); + if (hasCompleteTs()) { + result = result && (getCompleteTs() + == other.getCompleteTs()); + } + result = result && getAncestorsList() + .equals(other.getAncestorsList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + } + if (hasBackupType()) { + hash = (37 * hash) + BACKUP_TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getBackupType()); + } + if (hasRootDir()) { + hash = (37 * hash) + ROOT_DIR_FIELD_NUMBER; + hash = (53 * hash) + getRootDir().hashCode(); + } + if (getTableListCount() > 0) { + hash = (37 * hash) + TABLE_LIST_FIELD_NUMBER; + hash = (53 * hash) + getTableListList().hashCode(); + } + if (hasStartTs()) { + hash = (37 * hash) + START_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartTs()); + } + if (hasCompleteTs()) { + hash = (37 * hash) + COMPLETE_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getCompleteTs()); + } + if (getAncestorsCount() > 0) { + hash = (37 * hash) + ANCESTORS_FIELD_NUMBER; + hash = (53 * hash) + getAncestorsList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupImage} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableListFieldBuilder(); + getAncestorsFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + backupId_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + bitField0_ = (bitField0_ & ~0x00000002); + rootDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + if (tableListBuilder_ == null) { + tableList_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + } else { + tableListBuilder_.clear(); + } + startTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000010); + completeTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000020); + if (ancestorsBuilder_ == null) { + ancestors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ancestorsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupImage_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.backupId_ = backupId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.backupType_ = backupType_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.rootDir_ = rootDir_; + if (tableListBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = java.util.Collections.unmodifiableList(tableList_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.tableList_ = tableList_; + } else { + result.tableList_ = tableListBuilder_.build(); + } + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000008; + } + result.startTs_ = startTs_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000010; + } + result.completeTs_ = completeTs_; + if (ancestorsBuilder_ == null) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { + ancestors_ = java.util.Collections.unmodifiableList(ancestors_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.ancestors_ = ancestors_; + } else { + result.ancestors_ = ancestorsBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()) return this; + if (other.hasBackupId()) { + bitField0_ |= 0x00000001; + backupId_ = other.backupId_; + onChanged(); + } + if (other.hasBackupType()) { + setBackupType(other.getBackupType()); + } + if (other.hasRootDir()) { + bitField0_ |= 0x00000004; + rootDir_ = other.rootDir_; + onChanged(); + } + if (tableListBuilder_ == null) { + if (!other.tableList_.isEmpty()) { + if (tableList_.isEmpty()) { + tableList_ = other.tableList_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureTableListIsMutable(); + tableList_.addAll(other.tableList_); + } + onChanged(); + } + } else { + if (!other.tableList_.isEmpty()) { + if (tableListBuilder_.isEmpty()) { + tableListBuilder_.dispose(); + tableListBuilder_ = null; + tableList_ = other.tableList_; + bitField0_ = (bitField0_ & ~0x00000008); + tableListBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableListFieldBuilder() : null; + } else { + tableListBuilder_.addAllMessages(other.tableList_); + } + } + } + if (other.hasStartTs()) { + setStartTs(other.getStartTs()); + } + if (other.hasCompleteTs()) { + setCompleteTs(other.getCompleteTs()); + } + if (ancestorsBuilder_ == null) { + if (!other.ancestors_.isEmpty()) { + if (ancestors_.isEmpty()) { + ancestors_ = other.ancestors_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureAncestorsIsMutable(); + ancestors_.addAll(other.ancestors_); + } + onChanged(); + } + } else { + if (!other.ancestors_.isEmpty()) { + if (ancestorsBuilder_.isEmpty()) { + ancestorsBuilder_.dispose(); + ancestorsBuilder_ = null; + ancestors_ = other.ancestors_; + bitField0_ = (bitField0_ & ~0x00000040); + ancestorsBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getAncestorsFieldBuilder() : null; + } else { + ancestorsBuilder_.addAllMessages(other.ancestors_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBackupId()) { + + return false; + } + if (!hasBackupType()) { + + return false; + } + if (!hasRootDir()) { + + return false; + } + if (!hasStartTs()) { + + return false; + } + if (!hasCompleteTs()) { + + return false; + } + for (int i = 0; i < getTableListCount(); i++) { + if (!getTableList(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getAncestorsCount(); i++) { + if (!getAncestors(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string backup_id = 1; + private java.lang.Object backupId_ = ""; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string backup_id = 1; + */ + public Builder setBackupId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; + onChanged(); + return this; + } + /** + * required string backup_id = 1; + */ + public Builder clearBackupId() { + bitField0_ = (bitField0_ & ~0x00000001); + backupId_ = getDefaultInstance().getBackupId(); + onChanged(); + return this; + } + /** + * required string backup_id = 1; + */ + public Builder setBackupIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; + onChanged(); + return this; + } + + // required .hbase.pb.BackupType backup_type = 2; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + public boolean hasBackupType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getBackupType() { + return backupType_; + } + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + public Builder setBackupType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + backupType_ = value; + onChanged(); + return this; + } + /** + * required .hbase.pb.BackupType backup_type = 2; + */ + public Builder clearBackupType() { + bitField0_ = (bitField0_ & ~0x00000002); + backupType_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + onChanged(); + return this; + } + + // required string root_dir = 3; + private java.lang.Object rootDir_ = ""; + /** + * required string root_dir = 3; + */ + public boolean hasRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string root_dir = 3; + */ + public java.lang.String getRootDir() { + java.lang.Object ref = rootDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + rootDir_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string root_dir = 3; + */ + public com.google.protobuf.ByteString + getRootDirBytes() { + java.lang.Object ref = rootDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + rootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string root_dir = 3; + */ + public Builder setRootDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + rootDir_ = value; + onChanged(); + return this; + } + /** + * required string root_dir = 3; + */ + public Builder clearRootDir() { + bitField0_ = (bitField0_ & ~0x00000004); + rootDir_ = getDefaultInstance().getRootDir(); + onChanged(); + return this; + } + /** + * required string root_dir = 3; + */ + public Builder setRootDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + rootDir_ = value; + onChanged(); + return this; + } + + // repeated .hbase.pb.TableName table_list = 4; + private java.util.List tableList_ = + java.util.Collections.emptyList(); + private void ensureTableListIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = new java.util.ArrayList(tableList_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableListBuilder_; + + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List getTableListList() { + if (tableListBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableList_); + } else { + return tableListBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public int getTableListCount() { + if (tableListBuilder_ == null) { + return tableList_.size(); + } else { + return tableListBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { + if (tableListBuilder_ == null) { + return tableList_.get(index); + } else { + return tableListBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder setTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.set(index, value); + onChanged(); + } else { + tableListBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder setTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.set(index, builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.add(value); + onChanged(); + } else { + tableListBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.add(index, value); + onChanged(); + } else { + tableListBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.add(builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.add(index, builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addAllTableList( + java.lang.Iterable values) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + super.addAll(values, tableList_); + onChanged(); + } else { + tableListBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder clearTableList() { + if (tableListBuilder_ == null) { + tableList_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + tableListBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder removeTableList(int index) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.remove(index); + onChanged(); + } else { + tableListBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableListBuilder( + int index) { + return getTableListFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index) { + if (tableListBuilder_ == null) { + return tableList_.get(index); } else { + return tableListBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List + getTableListOrBuilderList() { + if (tableListBuilder_ != null) { + return tableListBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableList_); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder() { + return getTableListFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder( + int index) { + return getTableListFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List + getTableListBuilderList() { + return getTableListFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableListFieldBuilder() { + if (tableListBuilder_ == null) { + tableListBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableList_, + ((bitField0_ & 0x00000008) == 0x00000008), + getParentForChildren(), + isClean()); + tableList_ = null; + } + return tableListBuilder_; + } + + // required uint64 start_ts = 5; + private long startTs_ ; + /** + * required uint64 start_ts = 5; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint64 start_ts = 5; + */ + public long getStartTs() { + return startTs_; + } + /** + * required uint64 start_ts = 5; + */ + public Builder setStartTs(long value) { + bitField0_ |= 0x00000010; + startTs_ = value; + onChanged(); + return this; + } + /** + * required uint64 start_ts = 5; + */ + public Builder clearStartTs() { + bitField0_ = (bitField0_ & ~0x00000010); + startTs_ = 0L; + onChanged(); + return this; + } + + // required uint64 complete_ts = 6; + private long completeTs_ ; + /** + * required uint64 complete_ts = 6; + */ + public boolean hasCompleteTs() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * required uint64 complete_ts = 6; + */ + public long getCompleteTs() { + return completeTs_; + } + /** + * required uint64 complete_ts = 6; + */ + public Builder setCompleteTs(long value) { + bitField0_ |= 0x00000020; + completeTs_ = value; + onChanged(); + return this; + } + /** + * required uint64 complete_ts = 6; + */ + public Builder clearCompleteTs() { + bitField0_ = (bitField0_ & ~0x00000020); + completeTs_ = 0L; + onChanged(); + return this; + } + + // repeated .hbase.pb.BackupImage ancestors = 7; + private java.util.List ancestors_ = + java.util.Collections.emptyList(); + private void ensureAncestorsIsMutable() { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { + ancestors_ = new java.util.ArrayList(ancestors_); + bitField0_ |= 0x00000040; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder> ancestorsBuilder_; + + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public java.util.List getAncestorsList() { + if (ancestorsBuilder_ == null) { + return java.util.Collections.unmodifiableList(ancestors_); + } else { + return ancestorsBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public int getAncestorsCount() { + if (ancestorsBuilder_ == null) { + return ancestors_.size(); + } else { + return ancestorsBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getAncestors(int index) { + if (ancestorsBuilder_ == null) { + return ancestors_.get(index); + } else { + return ancestorsBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder setAncestors( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { + if (ancestorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAncestorsIsMutable(); + ancestors_.set(index, value); + onChanged(); + } else { + ancestorsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder setAncestors( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { + if (ancestorsBuilder_ == null) { + ensureAncestorsIsMutable(); + ancestors_.set(index, builderForValue.build()); + onChanged(); + } else { + ancestorsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder addAncestors(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { + if (ancestorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAncestorsIsMutable(); + ancestors_.add(value); + onChanged(); + } else { + ancestorsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder addAncestors( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { + if (ancestorsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureAncestorsIsMutable(); + ancestors_.add(index, value); + onChanged(); + } else { + ancestorsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder addAncestors( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { + if (ancestorsBuilder_ == null) { + ensureAncestorsIsMutable(); + ancestors_.add(builderForValue.build()); + onChanged(); + } else { + ancestorsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder addAncestors( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { + if (ancestorsBuilder_ == null) { + ensureAncestorsIsMutable(); + ancestors_.add(index, builderForValue.build()); + onChanged(); + } else { + ancestorsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder addAllAncestors( + java.lang.Iterable values) { + if (ancestorsBuilder_ == null) { + ensureAncestorsIsMutable(); + super.addAll(values, ancestors_); + onChanged(); + } else { + ancestorsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder clearAncestors() { + if (ancestorsBuilder_ == null) { + ancestors_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + } else { + ancestorsBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public Builder removeAncestors(int index) { + if (ancestorsBuilder_ == null) { + ensureAncestorsIsMutable(); + ancestors_.remove(index); + onChanged(); + } else { + ancestorsBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder getAncestorsBuilder( + int index) { + return getAncestorsFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getAncestorsOrBuilder( + int index) { + if (ancestorsBuilder_ == null) { + return ancestors_.get(index); } else { + return ancestorsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public java.util.List + getAncestorsOrBuilderList() { + if (ancestorsBuilder_ != null) { + return ancestorsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(ancestors_); + } + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addAncestorsBuilder() { + return getAncestorsFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()); + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addAncestorsBuilder( + int index) { + return getAncestorsFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()); + } + /** + * repeated .hbase.pb.BackupImage ancestors = 7; + */ + public java.util.List + getAncestorsBuilderList() { + return getAncestorsFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder> + getAncestorsFieldBuilder() { + if (ancestorsBuilder_ == null) { + ancestorsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder>( + ancestors_, + ((bitField0_ & 0x00000040) == 0x00000040), + getParentForChildren(), + isClean()); + ancestors_ = null; + } + return ancestorsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupImage) + } + + static { + defaultInstance = new BackupImage(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BackupImage) + } + + public interface ServerTimestampOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string server = 1; + /** + * required string server = 1; + */ + boolean hasServer(); + /** + * required string server = 1; + */ + java.lang.String getServer(); + /** + * required string server = 1; + */ + com.google.protobuf.ByteString + getServerBytes(); + + // required uint64 timestamp = 2; + /** + * required uint64 timestamp = 2; + */ + boolean hasTimestamp(); + /** + * required uint64 timestamp = 2; + */ + long getTimestamp(); + } + /** + * Protobuf type {@code hbase.pb.ServerTimestamp} + */ + public static final class ServerTimestamp extends + com.google.protobuf.GeneratedMessage + implements ServerTimestampOrBuilder { + // Use ServerTimestamp.newBuilder() to construct. + private ServerTimestamp(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ServerTimestamp(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ServerTimestamp defaultInstance; + public static ServerTimestamp getDefaultInstance() { + return defaultInstance; + } + + public ServerTimestamp getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ServerTimestamp( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + server_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + timestamp_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ServerTimestamp parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ServerTimestamp(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string server = 1; + public static final int SERVER_FIELD_NUMBER = 1; + private java.lang.Object server_; + /** + * required string server = 1; + */ + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string server = 1; + */ + public java.lang.String getServer() { + java.lang.Object ref = server_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + server_ = s; + } + return s; + } + } + /** + * required string server = 1; + */ + public com.google.protobuf.ByteString + getServerBytes() { + java.lang.Object ref = server_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + server_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required uint64 timestamp = 2; + public static final int TIMESTAMP_FIELD_NUMBER = 2; + private long timestamp_; + /** + * required uint64 timestamp = 2; + */ + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 timestamp = 2; + */ + public long getTimestamp() { + return timestamp_; + } + + private void initFields() { + server_ = ""; + timestamp_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasServer()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTimestamp()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getServerBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, timestamp_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getServerBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, timestamp_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp) obj; + + boolean result = true; + result = result && (hasServer() == other.hasServer()); + if (hasServer()) { + result = result && getServer() + .equals(other.getServer()); + } + result = result && (hasTimestamp() == other.hasTimestamp()); + if (hasTimestamp()) { + result = result && (getTimestamp() + == other.getTimestamp()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasServer()) { + hash = (37 * hash) + SERVER_FIELD_NUMBER; + hash = (53 * hash) + getServer().hashCode(); + } + if (hasTimestamp()) { + hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTimestamp()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ServerTimestamp} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + server_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + timestamp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_ServerTimestamp_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.server_ = server_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.timestamp_ = timestamp_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance()) return this; + if (other.hasServer()) { + bitField0_ |= 0x00000001; + server_ = other.server_; + onChanged(); + } + if (other.hasTimestamp()) { + setTimestamp(other.getTimestamp()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasServer()) { + + return false; + } + if (!hasTimestamp()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string server = 1; + private java.lang.Object server_ = ""; + /** + * required string server = 1; + */ + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string server = 1; + */ + public java.lang.String getServer() { + java.lang.Object ref = server_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + server_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string server = 1; + */ + public com.google.protobuf.ByteString + getServerBytes() { + java.lang.Object ref = server_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + server_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string server = 1; + */ + public Builder setServer( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + server_ = value; + onChanged(); + return this; + } + /** + * required string server = 1; + */ + public Builder clearServer() { + bitField0_ = (bitField0_ & ~0x00000001); + server_ = getDefaultInstance().getServer(); + onChanged(); + return this; + } + /** + * required string server = 1; + */ + public Builder setServerBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + server_ = value; + onChanged(); + return this; + } + + // required uint64 timestamp = 2; + private long timestamp_ ; + /** + * required uint64 timestamp = 2; + */ + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 timestamp = 2; + */ + public long getTimestamp() { + return timestamp_; + } + /** + * required uint64 timestamp = 2; + */ + public Builder setTimestamp(long value) { + bitField0_ |= 0x00000002; + timestamp_ = value; + onChanged(); + return this; + } + /** + * required uint64 timestamp = 2; + */ + public Builder clearTimestamp() { + bitField0_ = (bitField0_ & ~0x00000002); + timestamp_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ServerTimestamp) + } + + static { + defaultInstance = new ServerTimestamp(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ServerTimestamp) + } + + public interface TableServerTimestampOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.TableName table = 1; + /** + * required .hbase.pb.TableName table = 1; + */ + boolean hasTable(); + /** + * required .hbase.pb.TableName table = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable(); + /** + * required .hbase.pb.TableName table = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder(); + + // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + java.util.List + getServerTimestampList(); + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index); + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + int getServerTimestampCount(); + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + java.util.List + getServerTimestampOrBuilderList(); + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.TableServerTimestamp} + */ + public static final class TableServerTimestamp extends + com.google.protobuf.GeneratedMessage + implements TableServerTimestampOrBuilder { + // Use TableServerTimestamp.newBuilder() to construct. + private TableServerTimestamp(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TableServerTimestamp(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TableServerTimestamp defaultInstance; + public static TableServerTimestamp getDefaultInstance() { + return defaultInstance; + } + + public TableServerTimestamp getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TableServerTimestamp( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = table_.toBuilder(); + } + table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(table_); + table_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + serverTimestamp_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = java.util.Collections.unmodifiableList(serverTimestamp_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableServerTimestamp parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableServerTimestamp(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.TableName table = 1; + public static final int TABLE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_; + /** + * required .hbase.pb.TableName table = 1; + */ + public boolean hasTable() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + return table_; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + return table_; + } + + // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + public static final int SERVER_TIMESTAMP_FIELD_NUMBER = 2; + private java.util.List serverTimestamp_; + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List getServerTimestampList() { + return serverTimestamp_; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List + getServerTimestampOrBuilderList() { + return serverTimestamp_; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public int getServerTimestampCount() { + return serverTimestamp_.size(); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index) { + return serverTimestamp_.get(index); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( + int index) { + return serverTimestamp_.get(index); + } + + private void initFields() { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + serverTimestamp_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTable()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTable().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getServerTimestampCount(); i++) { + if (!getServerTimestamp(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, table_); + } + for (int i = 0; i < serverTimestamp_.size(); i++) { + output.writeMessage(2, serverTimestamp_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, table_); + } + for (int i = 0; i < serverTimestamp_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, serverTimestamp_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp) obj; + + boolean result = true; + result = result && (hasTable() == other.hasTable()); + if (hasTable()) { + result = result && getTable() + .equals(other.getTable()); + } + result = result && getServerTimestampList() + .equals(other.getServerTimestampList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTable()) { + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + } + if (getServerTimestampCount() > 0) { + hash = (37 * hash) + SERVER_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + getServerTimestampList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.TableServerTimestamp} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableFieldBuilder(); + getServerTimestampFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (serverTimestampBuilder_ == null) { + serverTimestamp_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + serverTimestampBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableServerTimestamp_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableBuilder_ == null) { + result.table_ = table_; + } else { + result.table_ = tableBuilder_.build(); + } + if (serverTimestampBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = java.util.Collections.unmodifiableList(serverTimestamp_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.serverTimestamp_ = serverTimestamp_; + } else { + result.serverTimestamp_ = serverTimestampBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance()) return this; + if (other.hasTable()) { + mergeTable(other.getTable()); + } + if (serverTimestampBuilder_ == null) { + if (!other.serverTimestamp_.isEmpty()) { + if (serverTimestamp_.isEmpty()) { + serverTimestamp_ = other.serverTimestamp_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureServerTimestampIsMutable(); + serverTimestamp_.addAll(other.serverTimestamp_); + } + onChanged(); + } + } else { + if (!other.serverTimestamp_.isEmpty()) { + if (serverTimestampBuilder_.isEmpty()) { + serverTimestampBuilder_.dispose(); + serverTimestampBuilder_ = null; + serverTimestamp_ = other.serverTimestamp_; + bitField0_ = (bitField0_ & ~0x00000002); + serverTimestampBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getServerTimestampFieldBuilder() : null; + } else { + serverTimestampBuilder_.addAllMessages(other.serverTimestamp_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTable()) { + + return false; + } + if (!getTable().isInitialized()) { + + return false; + } + for (int i = 0; i < getServerTimestampCount(); i++) { + if (!getServerTimestamp(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.TableName table = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableBuilder_; + /** + * required .hbase.pb.TableName table = 1; + */ + public boolean hasTable() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + if (tableBuilder_ == null) { + return table_; + } else { + return tableBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + onChanged(); + } else { + tableBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder setTable( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableBuilder_ == null) { + table_ = builderForValue.build(); + onChanged(); + } else { + tableBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + table_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial(); + } else { + table_ = value; + } + onChanged(); + } else { + tableBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder clearTable() { + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + if (tableBuilder_ != null) { + return tableBuilder_.getMessageOrBuilder(); + } else { + return table_; + } + } + /** + * required .hbase.pb.TableName table = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableFieldBuilder() { + if (tableBuilder_ == null) { + tableBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + table_, + getParentForChildren(), + isClean()); + table_ = null; + } + return tableBuilder_; + } + + // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + private java.util.List serverTimestamp_ = + java.util.Collections.emptyList(); + private void ensureServerTimestampIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = new java.util.ArrayList(serverTimestamp_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder> serverTimestampBuilder_; + + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List getServerTimestampList() { + if (serverTimestampBuilder_ == null) { + return java.util.Collections.unmodifiableList(serverTimestamp_); + } else { + return serverTimestampBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public int getServerTimestampCount() { + if (serverTimestampBuilder_ == null) { + return serverTimestamp_.size(); + } else { + return serverTimestampBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index) { + if (serverTimestampBuilder_ == null) { + return serverTimestamp_.get(index); + } else { + return serverTimestampBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder setServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { + if (serverTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerTimestampIsMutable(); + serverTimestamp_.set(index, value); + onChanged(); + } else { + serverTimestampBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder setServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.set(index, builderForValue.build()); + onChanged(); + } else { + serverTimestampBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addServerTimestamp(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { + if (serverTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerTimestampIsMutable(); + serverTimestamp_.add(value); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { + if (serverTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerTimestampIsMutable(); + serverTimestamp_.add(index, value); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addServerTimestamp( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.add(builderForValue.build()); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.add(index, builderForValue.build()); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addAllServerTimestamp( + java.lang.Iterable values) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + super.addAll(values, serverTimestamp_); + onChanged(); + } else { + serverTimestampBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder clearServerTimestamp() { + if (serverTimestampBuilder_ == null) { + serverTimestamp_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + serverTimestampBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder removeServerTimestamp(int index) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.remove(index); + onChanged(); + } else { + serverTimestampBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder getServerTimestampBuilder( + int index) { + return getServerTimestampFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( + int index) { + if (serverTimestampBuilder_ == null) { + return serverTimestamp_.get(index); } else { + return serverTimestampBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List + getServerTimestampOrBuilderList() { + if (serverTimestampBuilder_ != null) { + return serverTimestampBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(serverTimestamp_); + } + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder addServerTimestampBuilder() { + return getServerTimestampFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder addServerTimestampBuilder( + int index) { + return getServerTimestampFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List + getServerTimestampBuilderList() { + return getServerTimestampFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder> + getServerTimestampFieldBuilder() { + if (serverTimestampBuilder_ == null) { + serverTimestampBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder>( + serverTimestamp_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + serverTimestamp_ = null; + } + return serverTimestampBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.TableServerTimestamp) + } + + static { + defaultInstance = new TableServerTimestamp(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.TableServerTimestamp) + } + + public interface BackupManifestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string version = 1; + /** + * required string version = 1; + */ + boolean hasVersion(); + /** + * required string version = 1; + */ + java.lang.String getVersion(); + /** + * required string version = 1; + */ + com.google.protobuf.ByteString + getVersionBytes(); + + // required string backup_id = 2; + /** + * required string backup_id = 2; + */ + boolean hasBackupId(); + /** + * required string backup_id = 2; + */ + java.lang.String getBackupId(); + /** + * required string backup_id = 2; + */ + com.google.protobuf.ByteString + getBackupIdBytes(); + + // required .hbase.pb.BackupType type = 3; + /** + * required .hbase.pb.BackupType type = 3; + */ + boolean hasType(); + /** + * required .hbase.pb.BackupType type = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType(); + + // repeated .hbase.pb.TableName table_list = 4; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + java.util.List + getTableListList(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + int getTableListCount(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + java.util.List + getTableListOrBuilderList(); + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index); + + // required uint64 start_ts = 5; + /** + * required uint64 start_ts = 5; + */ + boolean hasStartTs(); + /** + * required uint64 start_ts = 5; + */ + long getStartTs(); + + // required uint64 complete_ts = 6; + /** + * required uint64 complete_ts = 6; + */ + boolean hasCompleteTs(); + /** + * required uint64 complete_ts = 6; + */ + long getCompleteTs(); + + // repeated .hbase.pb.TableServerTimestamp tst_map = 7; + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + java.util.List + getTstMapList(); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getTstMap(int index); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + int getTstMapCount(); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + java.util.List + getTstMapOrBuilderList(); + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder getTstMapOrBuilder( + int index); + + // repeated .hbase.pb.BackupImage dependent_backup_image = 8; + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + java.util.List + getDependentBackupImageList(); + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDependentBackupImage(int index); + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + int getDependentBackupImageCount(); + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + java.util.List + getDependentBackupImageOrBuilderList(); + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getDependentBackupImageOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.BackupManifest} + */ + public static final class BackupManifest extends + com.google.protobuf.GeneratedMessage + implements BackupManifestOrBuilder { + // Use BackupManifest.newBuilder() to construct. + private BackupManifest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BackupManifest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BackupManifest defaultInstance; + public static BackupManifest getDefaultInstance() { + return defaultInstance; + } + + public BackupManifest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BackupManifest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + version_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + backupId_ = input.readBytes(); + break; + } + case 24: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(3, rawValue); + } else { + bitField0_ |= 0x00000004; + type_ = value; + } + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + tableList_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + case 40: { + bitField0_ |= 0x00000008; + startTs_ = input.readUInt64(); + break; + } + case 48: { + bitField0_ |= 0x00000010; + completeTs_ = input.readUInt64(); + break; + } + case 58: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + tstMap_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + tstMap_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.PARSER, extensionRegistry)); + break; + } + case 66: { + if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + dependentBackupImage_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000080; + } + dependentBackupImage_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = java.util.Collections.unmodifiableList(tableList_); + } + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + tstMap_ = java.util.Collections.unmodifiableList(tstMap_); + } + if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) { + dependentBackupImage_ = java.util.Collections.unmodifiableList(dependentBackupImage_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BackupManifest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BackupManifest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string version = 1; + public static final int VERSION_FIELD_NUMBER = 1; + private java.lang.Object version_; + /** + * required string version = 1; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string version = 1; + */ + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + version_ = s; + } + return s; + } + } + /** + * required string version = 1; + */ + public com.google.protobuf.ByteString + getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string backup_id = 2; + public static final int BACKUP_ID_FIELD_NUMBER = 2; + private java.lang.Object backupId_; + /** + * required string backup_id = 2; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string backup_id = 2; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + backupId_ = s; + } + return s; + } + } + /** + * required string backup_id = 2; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .hbase.pb.BackupType type = 3; + public static final int TYPE_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_; + /** + * required .hbase.pb.BackupType type = 3; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .hbase.pb.BackupType type = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { + return type_; + } + + // repeated .hbase.pb.TableName table_list = 4; + public static final int TABLE_LIST_FIELD_NUMBER = 4; + private java.util.List tableList_; + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List getTableListList() { + return tableList_; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List + getTableListOrBuilderList() { + return tableList_; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public int getTableListCount() { + return tableList_.size(); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { + return tableList_.get(index); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index) { + return tableList_.get(index); + } + + // required uint64 start_ts = 5; + public static final int START_TS_FIELD_NUMBER = 5; + private long startTs_; + /** + * required uint64 start_ts = 5; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 start_ts = 5; + */ + public long getStartTs() { + return startTs_; + } + + // required uint64 complete_ts = 6; + public static final int COMPLETE_TS_FIELD_NUMBER = 6; + private long completeTs_; + /** + * required uint64 complete_ts = 6; + */ + public boolean hasCompleteTs() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint64 complete_ts = 6; + */ + public long getCompleteTs() { + return completeTs_; + } + + // repeated .hbase.pb.TableServerTimestamp tst_map = 7; + public static final int TST_MAP_FIELD_NUMBER = 7; + private java.util.List tstMap_; + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public java.util.List getTstMapList() { + return tstMap_; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public java.util.List + getTstMapOrBuilderList() { + return tstMap_; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public int getTstMapCount() { + return tstMap_.size(); + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getTstMap(int index) { + return tstMap_.get(index); + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder getTstMapOrBuilder( + int index) { + return tstMap_.get(index); + } + + // repeated .hbase.pb.BackupImage dependent_backup_image = 8; + public static final int DEPENDENT_BACKUP_IMAGE_FIELD_NUMBER = 8; + private java.util.List dependentBackupImage_; + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public java.util.List getDependentBackupImageList() { + return dependentBackupImage_; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public java.util.List + getDependentBackupImageOrBuilderList() { + return dependentBackupImage_; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public int getDependentBackupImageCount() { + return dependentBackupImage_.size(); + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDependentBackupImage(int index) { + return dependentBackupImage_.get(index); + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getDependentBackupImageOrBuilder( + int index) { + return dependentBackupImage_.get(index); + } + + private void initFields() { + version_ = ""; + backupId_ = ""; + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + tableList_ = java.util.Collections.emptyList(); + startTs_ = 0L; + completeTs_ = 0L; + tstMap_ = java.util.Collections.emptyList(); + dependentBackupImage_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasVersion()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasBackupId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasStartTs()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasCompleteTs()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTableListCount(); i++) { + if (!getTableList(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getTstMapCount(); i++) { + if (!getTstMap(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getDependentBackupImageCount(); i++) { + if (!getDependentBackupImage(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getVersionBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getBackupIdBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeEnum(3, type_.getNumber()); + } + for (int i = 0; i < tableList_.size(); i++) { + output.writeMessage(4, tableList_.get(i)); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(5, startTs_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeUInt64(6, completeTs_); + } + for (int i = 0; i < tstMap_.size(); i++) { + output.writeMessage(7, tstMap_.get(i)); + } + for (int i = 0; i < dependentBackupImage_.size(); i++) { + output.writeMessage(8, dependentBackupImage_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getVersionBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getBackupIdBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(3, type_.getNumber()); + } + for (int i = 0; i < tableList_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, tableList_.get(i)); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(5, startTs_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(6, completeTs_); + } + for (int i = 0; i < tstMap_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, tstMap_.get(i)); + } + for (int i = 0; i < dependentBackupImage_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(8, dependentBackupImage_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest) obj; + + boolean result = true; + result = result && (hasVersion() == other.hasVersion()); + if (hasVersion()) { + result = result && getVersion() + .equals(other.getVersion()); + } + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); + } + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && + (getType() == other.getType()); + } + result = result && getTableListList() + .equals(other.getTableListList()); + result = result && (hasStartTs() == other.hasStartTs()); + if (hasStartTs()) { + result = result && (getStartTs() + == other.getStartTs()); + } + result = result && (hasCompleteTs() == other.hasCompleteTs()); + if (hasCompleteTs()) { + result = result && (getCompleteTs() + == other.getCompleteTs()); + } + result = result && getTstMapList() + .equals(other.getTstMapList()); + result = result && getDependentBackupImageList() + .equals(other.getDependentBackupImageList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasVersion()) { + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion().hashCode(); + } + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + } + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getType()); + } + if (getTableListCount() > 0) { + hash = (37 * hash) + TABLE_LIST_FIELD_NUMBER; + hash = (53 * hash) + getTableListList().hashCode(); + } + if (hasStartTs()) { + hash = (37 * hash) + START_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartTs()); + } + if (hasCompleteTs()) { + hash = (37 * hash) + COMPLETE_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getCompleteTs()); + } + if (getTstMapCount() > 0) { + hash = (37 * hash) + TST_MAP_FIELD_NUMBER; + hash = (53 * hash) + getTstMapList().hashCode(); + } + if (getDependentBackupImageCount() > 0) { + hash = (37 * hash) + DEPENDENT_BACKUP_IMAGE_FIELD_NUMBER; + hash = (53 * hash) + getDependentBackupImageList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupManifest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableListFieldBuilder(); + getTstMapFieldBuilder(); + getDependentBackupImageFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + version_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + backupId_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + bitField0_ = (bitField0_ & ~0x00000004); + if (tableListBuilder_ == null) { + tableList_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + } else { + tableListBuilder_.clear(); + } + startTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000010); + completeTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000020); + if (tstMapBuilder_ == null) { + tstMap_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + } else { + tstMapBuilder_.clear(); + } + if (dependentBackupImageBuilder_ == null) { + dependentBackupImage_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + } else { + dependentBackupImageBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupManifest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.version_ = version_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.backupId_ = backupId_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.type_ = type_; + if (tableListBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = java.util.Collections.unmodifiableList(tableList_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.tableList_ = tableList_; + } else { + result.tableList_ = tableListBuilder_.build(); + } + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000008; + } + result.startTs_ = startTs_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000010; + } + result.completeTs_ = completeTs_; + if (tstMapBuilder_ == null) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { + tstMap_ = java.util.Collections.unmodifiableList(tstMap_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.tstMap_ = tstMap_; + } else { + result.tstMap_ = tstMapBuilder_.build(); + } + if (dependentBackupImageBuilder_ == null) { + if (((bitField0_ & 0x00000080) == 0x00000080)) { + dependentBackupImage_ = java.util.Collections.unmodifiableList(dependentBackupImage_); + bitField0_ = (bitField0_ & ~0x00000080); + } + result.dependentBackupImage_ = dependentBackupImage_; + } else { + result.dependentBackupImage_ = dependentBackupImageBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest.getDefaultInstance()) return this; + if (other.hasVersion()) { + bitField0_ |= 0x00000001; + version_ = other.version_; + onChanged(); + } + if (other.hasBackupId()) { + bitField0_ |= 0x00000002; + backupId_ = other.backupId_; + onChanged(); + } + if (other.hasType()) { + setType(other.getType()); + } + if (tableListBuilder_ == null) { + if (!other.tableList_.isEmpty()) { + if (tableList_.isEmpty()) { + tableList_ = other.tableList_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureTableListIsMutable(); + tableList_.addAll(other.tableList_); + } + onChanged(); + } + } else { + if (!other.tableList_.isEmpty()) { + if (tableListBuilder_.isEmpty()) { + tableListBuilder_.dispose(); + tableListBuilder_ = null; + tableList_ = other.tableList_; + bitField0_ = (bitField0_ & ~0x00000008); + tableListBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableListFieldBuilder() : null; + } else { + tableListBuilder_.addAllMessages(other.tableList_); + } + } + } + if (other.hasStartTs()) { + setStartTs(other.getStartTs()); + } + if (other.hasCompleteTs()) { + setCompleteTs(other.getCompleteTs()); + } + if (tstMapBuilder_ == null) { + if (!other.tstMap_.isEmpty()) { + if (tstMap_.isEmpty()) { + tstMap_ = other.tstMap_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureTstMapIsMutable(); + tstMap_.addAll(other.tstMap_); + } + onChanged(); + } + } else { + if (!other.tstMap_.isEmpty()) { + if (tstMapBuilder_.isEmpty()) { + tstMapBuilder_.dispose(); + tstMapBuilder_ = null; + tstMap_ = other.tstMap_; + bitField0_ = (bitField0_ & ~0x00000040); + tstMapBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTstMapFieldBuilder() : null; + } else { + tstMapBuilder_.addAllMessages(other.tstMap_); + } + } + } + if (dependentBackupImageBuilder_ == null) { + if (!other.dependentBackupImage_.isEmpty()) { + if (dependentBackupImage_.isEmpty()) { + dependentBackupImage_ = other.dependentBackupImage_; + bitField0_ = (bitField0_ & ~0x00000080); + } else { + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.addAll(other.dependentBackupImage_); + } + onChanged(); + } + } else { + if (!other.dependentBackupImage_.isEmpty()) { + if (dependentBackupImageBuilder_.isEmpty()) { + dependentBackupImageBuilder_.dispose(); + dependentBackupImageBuilder_ = null; + dependentBackupImage_ = other.dependentBackupImage_; + bitField0_ = (bitField0_ & ~0x00000080); + dependentBackupImageBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getDependentBackupImageFieldBuilder() : null; + } else { + dependentBackupImageBuilder_.addAllMessages(other.dependentBackupImage_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasVersion()) { + + return false; + } + if (!hasBackupId()) { + + return false; + } + if (!hasType()) { + + return false; + } + if (!hasStartTs()) { + + return false; + } + if (!hasCompleteTs()) { + + return false; + } + for (int i = 0; i < getTableListCount(); i++) { + if (!getTableList(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getTstMapCount(); i++) { + if (!getTstMap(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getDependentBackupImageCount(); i++) { + if (!getDependentBackupImage(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupManifest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string version = 1; + private java.lang.Object version_ = ""; + /** + * required string version = 1; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string version = 1; + */ + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + version_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string version = 1; + */ + public com.google.protobuf.ByteString + getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string version = 1; + */ + public Builder setVersion( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + version_ = value; + onChanged(); + return this; + } + /** + * required string version = 1; + */ + public Builder clearVersion() { + bitField0_ = (bitField0_ & ~0x00000001); + version_ = getDefaultInstance().getVersion(); + onChanged(); + return this; + } + /** + * required string version = 1; + */ + public Builder setVersionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + version_ = value; + onChanged(); + return this; + } + + // required string backup_id = 2; + private java.lang.Object backupId_ = ""; + /** + * required string backup_id = 2; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string backup_id = 2; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string backup_id = 2; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string backup_id = 2; + */ + public Builder setBackupId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + backupId_ = value; + onChanged(); + return this; + } + /** + * required string backup_id = 2; + */ + public Builder clearBackupId() { + bitField0_ = (bitField0_ & ~0x00000002); + backupId_ = getDefaultInstance().getBackupId(); + onChanged(); + return this; + } + /** + * required string backup_id = 2; + */ + public Builder setBackupIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + backupId_ = value; + onChanged(); + return this; + } + + // required .hbase.pb.BackupType type = 3; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + /** + * required .hbase.pb.BackupType type = 3; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .hbase.pb.BackupType type = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { + return type_; + } + /** + * required .hbase.pb.BackupType type = 3; + */ + public Builder setType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + type_ = value; + onChanged(); + return this; + } + /** + * required .hbase.pb.BackupType type = 3; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000004); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + onChanged(); + return this; + } + + // repeated .hbase.pb.TableName table_list = 4; + private java.util.List tableList_ = + java.util.Collections.emptyList(); + private void ensureTableListIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + tableList_ = new java.util.ArrayList(tableList_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableListBuilder_; + + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List getTableListList() { + if (tableListBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableList_); + } else { + return tableListBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public int getTableListCount() { + if (tableListBuilder_ == null) { + return tableList_.size(); + } else { + return tableListBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableList(int index) { + if (tableListBuilder_ == null) { + return tableList_.get(index); + } else { + return tableListBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder setTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.set(index, value); + onChanged(); + } else { + tableListBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder setTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.set(index, builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.add(value); + onChanged(); + } else { + tableListBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableListIsMutable(); + tableList_.add(index, value); + onChanged(); + } else { + tableListBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.add(builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addTableList( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.add(index, builderForValue.build()); + onChanged(); + } else { + tableListBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder addAllTableList( + java.lang.Iterable values) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + super.addAll(values, tableList_); + onChanged(); + } else { + tableListBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder clearTableList() { + if (tableListBuilder_ == null) { + tableList_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + tableListBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public Builder removeTableList(int index) { + if (tableListBuilder_ == null) { + ensureTableListIsMutable(); + tableList_.remove(index); + onChanged(); + } else { + tableListBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableListBuilder( + int index) { + return getTableListFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableListOrBuilder( + int index) { + if (tableListBuilder_ == null) { + return tableList_.get(index); } else { + return tableListBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List + getTableListOrBuilderList() { + if (tableListBuilder_ != null) { + return tableListBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableList_); + } + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder() { + return getTableListFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableListBuilder( + int index) { + return getTableListFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName table_list = 4; + */ + public java.util.List + getTableListBuilderList() { + return getTableListFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableListFieldBuilder() { + if (tableListBuilder_ == null) { + tableListBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableList_, + ((bitField0_ & 0x00000008) == 0x00000008), + getParentForChildren(), + isClean()); + tableList_ = null; + } + return tableListBuilder_; + } + + // required uint64 start_ts = 5; + private long startTs_ ; + /** + * required uint64 start_ts = 5; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint64 start_ts = 5; + */ + public long getStartTs() { + return startTs_; + } + /** + * required uint64 start_ts = 5; + */ + public Builder setStartTs(long value) { + bitField0_ |= 0x00000010; + startTs_ = value; + onChanged(); + return this; + } + /** + * required uint64 start_ts = 5; + */ + public Builder clearStartTs() { + bitField0_ = (bitField0_ & ~0x00000010); + startTs_ = 0L; + onChanged(); + return this; + } + + // required uint64 complete_ts = 6; + private long completeTs_ ; + /** + * required uint64 complete_ts = 6; + */ + public boolean hasCompleteTs() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * required uint64 complete_ts = 6; + */ + public long getCompleteTs() { + return completeTs_; + } + /** + * required uint64 complete_ts = 6; + */ + public Builder setCompleteTs(long value) { + bitField0_ |= 0x00000020; + completeTs_ = value; + onChanged(); + return this; + } + /** + * required uint64 complete_ts = 6; + */ + public Builder clearCompleteTs() { + bitField0_ = (bitField0_ & ~0x00000020); + completeTs_ = 0L; + onChanged(); + return this; + } + + // repeated .hbase.pb.TableServerTimestamp tst_map = 7; + private java.util.List tstMap_ = + java.util.Collections.emptyList(); + private void ensureTstMapIsMutable() { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { + tstMap_ = new java.util.ArrayList(tstMap_); + bitField0_ |= 0x00000040; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder> tstMapBuilder_; + + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public java.util.List getTstMapList() { + if (tstMapBuilder_ == null) { + return java.util.Collections.unmodifiableList(tstMap_); + } else { + return tstMapBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public int getTstMapCount() { + if (tstMapBuilder_ == null) { + return tstMap_.size(); + } else { + return tstMapBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp getTstMap(int index) { + if (tstMapBuilder_ == null) { + return tstMap_.get(index); + } else { + return tstMapBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public Builder setTstMap( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp value) { + if (tstMapBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTstMapIsMutable(); + tstMap_.set(index, value); + onChanged(); + } else { + tstMapBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public Builder setTstMap( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder builderForValue) { + if (tstMapBuilder_ == null) { + ensureTstMapIsMutable(); + tstMap_.set(index, builderForValue.build()); + onChanged(); + } else { + tstMapBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public Builder addTstMap(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp value) { + if (tstMapBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTstMapIsMutable(); + tstMap_.add(value); + onChanged(); + } else { + tstMapBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public Builder addTstMap( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp value) { + if (tstMapBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTstMapIsMutable(); + tstMap_.add(index, value); + onChanged(); + } else { + tstMapBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public Builder addTstMap( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder builderForValue) { + if (tstMapBuilder_ == null) { + ensureTstMapIsMutable(); + tstMap_.add(builderForValue.build()); + onChanged(); + } else { + tstMapBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public Builder addTstMap( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder builderForValue) { + if (tstMapBuilder_ == null) { + ensureTstMapIsMutable(); + tstMap_.add(index, builderForValue.build()); + onChanged(); + } else { + tstMapBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public Builder addAllTstMap( + java.lang.Iterable values) { + if (tstMapBuilder_ == null) { + ensureTstMapIsMutable(); + super.addAll(values, tstMap_); + onChanged(); + } else { + tstMapBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public Builder clearTstMap() { + if (tstMapBuilder_ == null) { + tstMap_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + } else { + tstMapBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public Builder removeTstMap(int index) { + if (tstMapBuilder_ == null) { + ensureTstMapIsMutable(); + tstMap_.remove(index); + onChanged(); + } else { + tstMapBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder getTstMapBuilder( + int index) { + return getTstMapFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder getTstMapOrBuilder( + int index) { + if (tstMapBuilder_ == null) { + return tstMap_.get(index); } else { + return tstMapBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public java.util.List + getTstMapOrBuilderList() { + if (tstMapBuilder_ != null) { + return tstMapBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tstMap_); + } + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder addTstMapBuilder() { + return getTstMapFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder addTstMapBuilder( + int index) { + return getTstMapFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableServerTimestamp tst_map = 7; + */ + public java.util.List + getTstMapBuilderList() { + return getTstMapFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder> + getTstMapFieldBuilder() { + if (tstMapBuilder_ == null) { + tstMapBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableServerTimestampOrBuilder>( + tstMap_, + ((bitField0_ & 0x00000040) == 0x00000040), + getParentForChildren(), + isClean()); + tstMap_ = null; + } + return tstMapBuilder_; + } + + // repeated .hbase.pb.BackupImage dependent_backup_image = 8; + private java.util.List dependentBackupImage_ = + java.util.Collections.emptyList(); + private void ensureDependentBackupImageIsMutable() { + if (!((bitField0_ & 0x00000080) == 0x00000080)) { + dependentBackupImage_ = new java.util.ArrayList(dependentBackupImage_); + bitField0_ |= 0x00000080; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder> dependentBackupImageBuilder_; + + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public java.util.List getDependentBackupImageList() { + if (dependentBackupImageBuilder_ == null) { + return java.util.Collections.unmodifiableList(dependentBackupImage_); + } else { + return dependentBackupImageBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public int getDependentBackupImageCount() { + if (dependentBackupImageBuilder_ == null) { + return dependentBackupImage_.size(); + } else { + return dependentBackupImageBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage getDependentBackupImage(int index) { + if (dependentBackupImageBuilder_ == null) { + return dependentBackupImage_.get(index); + } else { + return dependentBackupImageBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public Builder setDependentBackupImage( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { + if (dependentBackupImageBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.set(index, value); + onChanged(); + } else { + dependentBackupImageBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public Builder setDependentBackupImage( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { + if (dependentBackupImageBuilder_ == null) { + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.set(index, builderForValue.build()); + onChanged(); + } else { + dependentBackupImageBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public Builder addDependentBackupImage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { + if (dependentBackupImageBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.add(value); + onChanged(); + } else { + dependentBackupImageBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public Builder addDependentBackupImage( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage value) { + if (dependentBackupImageBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.add(index, value); + onChanged(); + } else { + dependentBackupImageBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public Builder addDependentBackupImage( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { + if (dependentBackupImageBuilder_ == null) { + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.add(builderForValue.build()); + onChanged(); + } else { + dependentBackupImageBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public Builder addDependentBackupImage( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder builderForValue) { + if (dependentBackupImageBuilder_ == null) { + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.add(index, builderForValue.build()); + onChanged(); + } else { + dependentBackupImageBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public Builder addAllDependentBackupImage( + java.lang.Iterable values) { + if (dependentBackupImageBuilder_ == null) { + ensureDependentBackupImageIsMutable(); + super.addAll(values, dependentBackupImage_); + onChanged(); + } else { + dependentBackupImageBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public Builder clearDependentBackupImage() { + if (dependentBackupImageBuilder_ == null) { + dependentBackupImage_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000080); + onChanged(); + } else { + dependentBackupImageBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public Builder removeDependentBackupImage(int index) { + if (dependentBackupImageBuilder_ == null) { + ensureDependentBackupImageIsMutable(); + dependentBackupImage_.remove(index); + onChanged(); + } else { + dependentBackupImageBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder getDependentBackupImageBuilder( + int index) { + return getDependentBackupImageFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder getDependentBackupImageOrBuilder( + int index) { + if (dependentBackupImageBuilder_ == null) { + return dependentBackupImage_.get(index); } else { + return dependentBackupImageBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public java.util.List + getDependentBackupImageOrBuilderList() { + if (dependentBackupImageBuilder_ != null) { + return dependentBackupImageBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(dependentBackupImage_); + } + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addDependentBackupImageBuilder() { + return getDependentBackupImageFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()); + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder addDependentBackupImageBuilder( + int index) { + return getDependentBackupImageFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.getDefaultInstance()); + } + /** + * repeated .hbase.pb.BackupImage dependent_backup_image = 8; + */ + public java.util.List + getDependentBackupImageBuilderList() { + return getDependentBackupImageFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder> + getDependentBackupImageFieldBuilder() { + if (dependentBackupImageBuilder_ == null) { + dependentBackupImageBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImage.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupImageOrBuilder>( + dependentBackupImage_, + ((bitField0_ & 0x00000080) == 0x00000080), + getParentForChildren(), + isClean()); + dependentBackupImage_ = null; + } + return dependentBackupImageBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupManifest) + } + + static { + defaultInstance = new BackupManifest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BackupManifest) + } + + public interface TableBackupStatusOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.TableName table = 1; + /** + * required .hbase.pb.TableName table = 1; + */ + boolean hasTable(); + /** + * required .hbase.pb.TableName table = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable(); + /** + * required .hbase.pb.TableName table = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder(); + + // required string target_dir = 2; + /** + * required string target_dir = 2; + */ + boolean hasTargetDir(); + /** + * required string target_dir = 2; + */ + java.lang.String getTargetDir(); + /** + * required string target_dir = 2; + */ + com.google.protobuf.ByteString + getTargetDirBytes(); + + // optional string snapshot = 3; + /** + * optional string snapshot = 3; + */ + boolean hasSnapshot(); + /** + * optional string snapshot = 3; + */ + java.lang.String getSnapshot(); + /** + * optional string snapshot = 3; + */ + com.google.protobuf.ByteString + getSnapshotBytes(); + } + /** + * Protobuf type {@code hbase.pb.TableBackupStatus} + */ + public static final class TableBackupStatus extends + com.google.protobuf.GeneratedMessage + implements TableBackupStatusOrBuilder { + // Use TableBackupStatus.newBuilder() to construct. + private TableBackupStatus(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TableBackupStatus(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TableBackupStatus defaultInstance; + public static TableBackupStatus getDefaultInstance() { + return defaultInstance; + } + + public TableBackupStatus getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TableBackupStatus( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = table_.toBuilder(); + } + table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(table_); + table_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + bitField0_ |= 0x00000002; + targetDir_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + snapshot_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableBackupStatus parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableBackupStatus(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.TableName table = 1; + public static final int TABLE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_; + /** + * required .hbase.pb.TableName table = 1; + */ + public boolean hasTable() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + return table_; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + return table_; + } + + // required string target_dir = 2; + public static final int TARGET_DIR_FIELD_NUMBER = 2; + private java.lang.Object targetDir_; + /** + * required string target_dir = 2; + */ + public boolean hasTargetDir() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string target_dir = 2; + */ + public java.lang.String getTargetDir() { + java.lang.Object ref = targetDir_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetDir_ = s; + } + return s; + } + } + /** + * required string target_dir = 2; + */ + public com.google.protobuf.ByteString + getTargetDirBytes() { + java.lang.Object ref = targetDir_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional string snapshot = 3; + public static final int SNAPSHOT_FIELD_NUMBER = 3; + private java.lang.Object snapshot_; + /** + * optional string snapshot = 3; + */ + public boolean hasSnapshot() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string snapshot = 3; + */ + public java.lang.String getSnapshot() { + java.lang.Object ref = snapshot_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + snapshot_ = s; + } + return s; + } + } + /** + * optional string snapshot = 3; + */ + public com.google.protobuf.ByteString + getSnapshotBytes() { + java.lang.Object ref = snapshot_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + snapshot_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + targetDir_ = ""; + snapshot_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTable()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTargetDir()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTable().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, table_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getTargetDirBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getSnapshotBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, table_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getTargetDirBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getSnapshotBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus) obj; + + boolean result = true; + result = result && (hasTable() == other.hasTable()); + if (hasTable()) { + result = result && getTable() + .equals(other.getTable()); + } + result = result && (hasTargetDir() == other.hasTargetDir()); + if (hasTargetDir()) { + result = result && getTargetDir() + .equals(other.getTargetDir()); + } + result = result && (hasSnapshot() == other.hasSnapshot()); + if (hasSnapshot()) { + result = result && getSnapshot() + .equals(other.getSnapshot()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTable()) { + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + } + if (hasTargetDir()) { + hash = (37 * hash) + TARGET_DIR_FIELD_NUMBER; + hash = (53 * hash) + getTargetDir().hashCode(); + } + if (hasSnapshot()) { + hash = (37 * hash) + SNAPSHOT_FIELD_NUMBER; + hash = (53 * hash) + getSnapshot().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.TableBackupStatus} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + targetDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + snapshot_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_TableBackupStatus_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableBuilder_ == null) { + result.table_ = table_; + } else { + result.table_ = tableBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.targetDir_ = targetDir_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.snapshot_ = snapshot_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance()) return this; + if (other.hasTable()) { + mergeTable(other.getTable()); + } + if (other.hasTargetDir()) { + bitField0_ |= 0x00000002; + targetDir_ = other.targetDir_; + onChanged(); + } + if (other.hasSnapshot()) { + bitField0_ |= 0x00000004; + snapshot_ = other.snapshot_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTable()) { + + return false; + } + if (!hasTargetDir()) { + + return false; + } + if (!getTable().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.TableName table = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableBuilder_; + /** + * required .hbase.pb.TableName table = 1; + */ + public boolean hasTable() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + if (tableBuilder_ == null) { + return table_; + } else { + return tableBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + onChanged(); + } else { + tableBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder setTable( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableBuilder_ == null) { + table_ = builderForValue.build(); + onChanged(); + } else { + tableBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + table_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial(); + } else { + table_ = value; + } + onChanged(); + } else { + tableBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public Builder clearTable() { + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.TableName table = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + if (tableBuilder_ != null) { + return tableBuilder_.getMessageOrBuilder(); + } else { + return table_; + } + } + /** + * required .hbase.pb.TableName table = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableFieldBuilder() { + if (tableBuilder_ == null) { + tableBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + table_, + getParentForChildren(), + isClean()); + table_ = null; + } + return tableBuilder_; + } + + // required string target_dir = 2; + private java.lang.Object targetDir_ = ""; + /** + * required string target_dir = 2; + */ + public boolean hasTargetDir() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string target_dir = 2; + */ + public java.lang.String getTargetDir() { + java.lang.Object ref = targetDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetDir_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string target_dir = 2; + */ + public com.google.protobuf.ByteString + getTargetDirBytes() { + java.lang.Object ref = targetDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string target_dir = 2; + */ + public Builder setTargetDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + targetDir_ = value; + onChanged(); + return this; + } + /** + * required string target_dir = 2; + */ + public Builder clearTargetDir() { + bitField0_ = (bitField0_ & ~0x00000002); + targetDir_ = getDefaultInstance().getTargetDir(); + onChanged(); + return this; + } + /** + * required string target_dir = 2; + */ + public Builder setTargetDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + targetDir_ = value; + onChanged(); + return this; + } + + // optional string snapshot = 3; + private java.lang.Object snapshot_ = ""; + /** + * optional string snapshot = 3; + */ + public boolean hasSnapshot() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional string snapshot = 3; + */ + public java.lang.String getSnapshot() { + java.lang.Object ref = snapshot_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + snapshot_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string snapshot = 3; + */ + public com.google.protobuf.ByteString + getSnapshotBytes() { + java.lang.Object ref = snapshot_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + snapshot_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string snapshot = 3; + */ + public Builder setSnapshot( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + snapshot_ = value; + onChanged(); + return this; + } + /** + * optional string snapshot = 3; + */ + public Builder clearSnapshot() { + bitField0_ = (bitField0_ & ~0x00000004); + snapshot_ = getDefaultInstance().getSnapshot(); + onChanged(); + return this; + } + /** + * optional string snapshot = 3; + */ + public Builder setSnapshotBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + snapshot_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.TableBackupStatus) + } + + static { + defaultInstance = new TableBackupStatus(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.TableBackupStatus) + } + + public interface BackupInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string backup_id = 1; + /** + * required string backup_id = 1; + */ + boolean hasBackupId(); + /** + * required string backup_id = 1; + */ + java.lang.String getBackupId(); + /** + * required string backup_id = 1; + */ + com.google.protobuf.ByteString + getBackupIdBytes(); + + // required .hbase.pb.BackupType type = 2; + /** + * required .hbase.pb.BackupType type = 2; + */ + boolean hasType(); + /** + * required .hbase.pb.BackupType type = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType(); + + // required string target_root_dir = 3; + /** + * required string target_root_dir = 3; + */ + boolean hasTargetRootDir(); + /** + * required string target_root_dir = 3; + */ + java.lang.String getTargetRootDir(); + /** + * required string target_root_dir = 3; + */ + com.google.protobuf.ByteString + getTargetRootDirBytes(); + + // optional .hbase.pb.BackupInfo.BackupState state = 4; + /** + * optional .hbase.pb.BackupInfo.BackupState state = 4; + */ + boolean hasState(); + /** + * optional .hbase.pb.BackupInfo.BackupState state = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState getState(); + + // optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + /** + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + */ + boolean hasPhase(); + /** + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase getPhase(); + + // optional string failed_message = 6; + /** + * optional string failed_message = 6; + */ + boolean hasFailedMessage(); + /** + * optional string failed_message = 6; + */ + java.lang.String getFailedMessage(); + /** + * optional string failed_message = 6; + */ + com.google.protobuf.ByteString + getFailedMessageBytes(); + + // repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + java.util.List + getTableBackupStatusList(); + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getTableBackupStatus(int index); + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + int getTableBackupStatusCount(); + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + java.util.List + getTableBackupStatusOrBuilderList(); + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getTableBackupStatusOrBuilder( + int index); + + // optional uint64 start_ts = 8; + /** + * optional uint64 start_ts = 8; + */ + boolean hasStartTs(); + /** + * optional uint64 start_ts = 8; + */ + long getStartTs(); + + // optional uint64 end_ts = 9; + /** + * optional uint64 end_ts = 9; + */ + boolean hasEndTs(); + /** + * optional uint64 end_ts = 9; + */ + long getEndTs(); + + // optional uint32 progress = 10; + /** + * optional uint32 progress = 10; + */ + boolean hasProgress(); + /** + * optional uint32 progress = 10; + */ + int getProgress(); + + // optional string job_id = 11; + /** + * optional string job_id = 11; + */ + boolean hasJobId(); + /** + * optional string job_id = 11; + */ + java.lang.String getJobId(); + /** + * optional string job_id = 11; + */ + com.google.protobuf.ByteString + getJobIdBytes(); + + // required uint32 workers_number = 12; + /** + * required uint32 workers_number = 12; + */ + boolean hasWorkersNumber(); + /** + * required uint32 workers_number = 12; + */ + int getWorkersNumber(); + + // required uint64 bandwidth = 13; + /** + * required uint64 bandwidth = 13; + */ + boolean hasBandwidth(); + /** + * required uint64 bandwidth = 13; + */ + long getBandwidth(); + } + /** + * Protobuf type {@code hbase.pb.BackupInfo} + */ + public static final class BackupInfo extends + com.google.protobuf.GeneratedMessage + implements BackupInfoOrBuilder { + // Use BackupInfo.newBuilder() to construct. + private BackupInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BackupInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BackupInfo defaultInstance; + public static BackupInfo getDefaultInstance() { + return defaultInstance; + } + + public BackupInfo getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BackupInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + backupId_ = input.readBytes(); + break; + } + case 16: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + type_ = value; + } + break; + } + case 26: { + bitField0_ |= 0x00000004; + targetRootDir_ = input.readBytes(); + break; + } + case 32: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(4, rawValue); + } else { + bitField0_ |= 0x00000008; + state_ = value; + } + break; + } + case 40: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase value = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(5, rawValue); + } else { + bitField0_ |= 0x00000010; + phase_ = value; + } + break; + } + case 50: { + bitField0_ |= 0x00000020; + failedMessage_ = input.readBytes(); + break; + } + case 58: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + tableBackupStatus_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + tableBackupStatus_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.PARSER, extensionRegistry)); + break; + } + case 64: { + bitField0_ |= 0x00000040; + startTs_ = input.readUInt64(); + break; + } + case 72: { + bitField0_ |= 0x00000080; + endTs_ = input.readUInt64(); + break; + } + case 80: { + bitField0_ |= 0x00000100; + progress_ = input.readUInt32(); + break; + } + case 90: { + bitField0_ |= 0x00000200; + jobId_ = input.readBytes(); + break; + } + case 96: { + bitField0_ |= 0x00000400; + workersNumber_ = input.readUInt32(); + break; + } + case 104: { + bitField0_ |= 0x00000800; + bandwidth_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + tableBackupStatus_ = java.util.Collections.unmodifiableList(tableBackupStatus_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BackupInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BackupInfo(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + /** + * Protobuf enum {@code hbase.pb.BackupInfo.BackupState} + */ + public enum BackupState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * WAITING = 0; + */ + WAITING(0, 0), + /** + * RUNNING = 1; + */ + RUNNING(1, 1), + /** + * COMPLETE = 2; + */ + COMPLETE(2, 2), + /** + * FAILED = 3; + */ + FAILED(3, 3), + /** + * CANCELLED = 4; + */ + CANCELLED(4, 4), + ; + + /** + * WAITING = 0; + */ + public static final int WAITING_VALUE = 0; + /** + * RUNNING = 1; + */ + public static final int RUNNING_VALUE = 1; + /** + * COMPLETE = 2; + */ + public static final int COMPLETE_VALUE = 2; + /** + * FAILED = 3; + */ + public static final int FAILED_VALUE = 3; + /** + * CANCELLED = 4; + */ + public static final int CANCELLED_VALUE = 4; + + + public final int getNumber() { return value; } + + public static BackupState valueOf(int value) { + switch (value) { + case 0: return WAITING; + case 1: return RUNNING; + case 2: return COMPLETE; + case 3: return FAILED; + case 4: return CANCELLED; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public BackupState findValueByNumber(int number) { + return BackupState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDescriptor().getEnumTypes().get(0); + } + + private static final BackupState[] VALUES = values(); + + public static BackupState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private BackupState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.BackupInfo.BackupState) + } + + /** + * Protobuf enum {@code hbase.pb.BackupInfo.BackupPhase} + */ + public enum BackupPhase + implements com.google.protobuf.ProtocolMessageEnum { + /** + * REQUEST = 0; + */ + REQUEST(0, 0), + /** + * SNAPSHOT = 1; + */ + SNAPSHOT(1, 1), + /** + * PREPARE_INCREMENTAL = 2; + */ + PREPARE_INCREMENTAL(2, 2), + /** + * SNAPSHOTCOPY = 3; + */ + SNAPSHOTCOPY(3, 3), + /** + * INCREMENTAL_COPY = 4; + */ + INCREMENTAL_COPY(4, 4), + /** + * STORE_MANIFEST = 5; + */ + STORE_MANIFEST(5, 5), + ; + + /** + * REQUEST = 0; + */ + public static final int REQUEST_VALUE = 0; + /** + * SNAPSHOT = 1; + */ + public static final int SNAPSHOT_VALUE = 1; + /** + * PREPARE_INCREMENTAL = 2; + */ + public static final int PREPARE_INCREMENTAL_VALUE = 2; + /** + * SNAPSHOTCOPY = 3; + */ + public static final int SNAPSHOTCOPY_VALUE = 3; + /** + * INCREMENTAL_COPY = 4; + */ + public static final int INCREMENTAL_COPY_VALUE = 4; + /** + * STORE_MANIFEST = 5; + */ + public static final int STORE_MANIFEST_VALUE = 5; + + + public final int getNumber() { return value; } + + public static BackupPhase valueOf(int value) { + switch (value) { + case 0: return REQUEST; + case 1: return SNAPSHOT; + case 2: return PREPARE_INCREMENTAL; + case 3: return SNAPSHOTCOPY; + case 4: return INCREMENTAL_COPY; + case 5: return STORE_MANIFEST; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public BackupPhase findValueByNumber(int number) { + return BackupPhase.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDescriptor().getEnumTypes().get(1); + } + + private static final BackupPhase[] VALUES = values(); + + public static BackupPhase valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private BackupPhase(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.BackupInfo.BackupPhase) + } + + private int bitField0_; + // required string backup_id = 1; + public static final int BACKUP_ID_FIELD_NUMBER = 1; + private java.lang.Object backupId_; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + backupId_ = s; + } + return s; + } + } + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .hbase.pb.BackupType type = 2; + public static final int TYPE_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_; + /** + * required .hbase.pb.BackupType type = 2; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.BackupType type = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { + return type_; + } + + // required string target_root_dir = 3; + public static final int TARGET_ROOT_DIR_FIELD_NUMBER = 3; + private java.lang.Object targetRootDir_; + /** + * required string target_root_dir = 3; + */ + public boolean hasTargetRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string target_root_dir = 3; + */ + public java.lang.String getTargetRootDir() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetRootDir_ = s; + } + return s; + } + } + /** + * required string target_root_dir = 3; + */ + public com.google.protobuf.ByteString + getTargetRootDirBytes() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetRootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional .hbase.pb.BackupInfo.BackupState state = 4; + public static final int STATE_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState state_; + /** + * optional .hbase.pb.BackupInfo.BackupState state = 4; + */ + public boolean hasState() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .hbase.pb.BackupInfo.BackupState state = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState getState() { + return state_; + } + + // optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + public static final int PHASE_FIELD_NUMBER = 5; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase phase_; + /** + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + */ + public boolean hasPhase() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase getPhase() { + return phase_; + } + + // optional string failed_message = 6; + public static final int FAILED_MESSAGE_FIELD_NUMBER = 6; + private java.lang.Object failedMessage_; + /** + * optional string failed_message = 6; + */ + public boolean hasFailedMessage() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional string failed_message = 6; + */ + public java.lang.String getFailedMessage() { + java.lang.Object ref = failedMessage_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + failedMessage_ = s; + } + return s; + } + } + /** + * optional string failed_message = 6; + */ + public com.google.protobuf.ByteString + getFailedMessageBytes() { + java.lang.Object ref = failedMessage_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + failedMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + public static final int TABLE_BACKUP_STATUS_FIELD_NUMBER = 7; + private java.util.List tableBackupStatus_; + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public java.util.List getTableBackupStatusList() { + return tableBackupStatus_; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public java.util.List + getTableBackupStatusOrBuilderList() { + return tableBackupStatus_; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public int getTableBackupStatusCount() { + return tableBackupStatus_.size(); + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getTableBackupStatus(int index) { + return tableBackupStatus_.get(index); + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getTableBackupStatusOrBuilder( + int index) { + return tableBackupStatus_.get(index); + } + + // optional uint64 start_ts = 8; + public static final int START_TS_FIELD_NUMBER = 8; + private long startTs_; + /** + * optional uint64 start_ts = 8; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional uint64 start_ts = 8; + */ + public long getStartTs() { + return startTs_; + } + + // optional uint64 end_ts = 9; + public static final int END_TS_FIELD_NUMBER = 9; + private long endTs_; + /** + * optional uint64 end_ts = 9; + */ + public boolean hasEndTs() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional uint64 end_ts = 9; + */ + public long getEndTs() { + return endTs_; + } + + // optional uint32 progress = 10; + public static final int PROGRESS_FIELD_NUMBER = 10; + private int progress_; + /** + * optional uint32 progress = 10; + */ + public boolean hasProgress() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional uint32 progress = 10; + */ + public int getProgress() { + return progress_; + } + + // optional string job_id = 11; + public static final int JOB_ID_FIELD_NUMBER = 11; + private java.lang.Object jobId_; + /** + * optional string job_id = 11; + */ + public boolean hasJobId() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional string job_id = 11; + */ + public java.lang.String getJobId() { + java.lang.Object ref = jobId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + jobId_ = s; + } + return s; + } + } + /** + * optional string job_id = 11; + */ + public com.google.protobuf.ByteString + getJobIdBytes() { + java.lang.Object ref = jobId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + jobId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required uint32 workers_number = 12; + public static final int WORKERS_NUMBER_FIELD_NUMBER = 12; + private int workersNumber_; + /** + * required uint32 workers_number = 12; + */ + public boolean hasWorkersNumber() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * required uint32 workers_number = 12; + */ + public int getWorkersNumber() { + return workersNumber_; + } + + // required uint64 bandwidth = 13; + public static final int BANDWIDTH_FIELD_NUMBER = 13; + private long bandwidth_; + /** + * required uint64 bandwidth = 13; + */ + public boolean hasBandwidth() { + return ((bitField0_ & 0x00000800) == 0x00000800); + } + /** + * required uint64 bandwidth = 13; + */ + public long getBandwidth() { + return bandwidth_; + } + + private void initFields() { + backupId_ = ""; + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + targetRootDir_ = ""; + state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.WAITING; + phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.REQUEST; + failedMessage_ = ""; + tableBackupStatus_ = java.util.Collections.emptyList(); + startTs_ = 0L; + endTs_ = 0L; + progress_ = 0; + jobId_ = ""; + workersNumber_ = 0; + bandwidth_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBackupId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTargetRootDir()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasWorkersNumber()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasBandwidth()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTableBackupStatusCount(); i++) { + if (!getTableBackupStatus(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getBackupIdBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeEnum(2, type_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getTargetRootDirBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeEnum(4, state_.getNumber()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeEnum(5, phase_.getNumber()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeBytes(6, getFailedMessageBytes()); + } + for (int i = 0; i < tableBackupStatus_.size(); i++) { + output.writeMessage(7, tableBackupStatus_.get(i)); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeUInt64(8, startTs_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeUInt64(9, endTs_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + output.writeUInt32(10, progress_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + output.writeBytes(11, getJobIdBytes()); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + output.writeUInt32(12, workersNumber_); + } + if (((bitField0_ & 0x00000800) == 0x00000800)) { + output.writeUInt64(13, bandwidth_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getBackupIdBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(2, type_.getNumber()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getTargetRootDirBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(4, state_.getNumber()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(5, phase_.getNumber()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(6, getFailedMessageBytes()); + } + for (int i = 0; i < tableBackupStatus_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, tableBackupStatus_.get(i)); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(8, startTs_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(9, endTs_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(10, progress_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(11, getJobIdBytes()); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(12, workersNumber_); + } + if (((bitField0_ & 0x00000800) == 0x00000800)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(13, bandwidth_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo) obj; + + boolean result = true; + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); + } + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && + (getType() == other.getType()); + } + result = result && (hasTargetRootDir() == other.hasTargetRootDir()); + if (hasTargetRootDir()) { + result = result && getTargetRootDir() + .equals(other.getTargetRootDir()); + } + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); + } + result = result && (hasPhase() == other.hasPhase()); + if (hasPhase()) { + result = result && + (getPhase() == other.getPhase()); + } + result = result && (hasFailedMessage() == other.hasFailedMessage()); + if (hasFailedMessage()) { + result = result && getFailedMessage() + .equals(other.getFailedMessage()); + } + result = result && getTableBackupStatusList() + .equals(other.getTableBackupStatusList()); + result = result && (hasStartTs() == other.hasStartTs()); + if (hasStartTs()) { + result = result && (getStartTs() + == other.getStartTs()); + } + result = result && (hasEndTs() == other.hasEndTs()); + if (hasEndTs()) { + result = result && (getEndTs() + == other.getEndTs()); + } + result = result && (hasProgress() == other.hasProgress()); + if (hasProgress()) { + result = result && (getProgress() + == other.getProgress()); + } + result = result && (hasJobId() == other.hasJobId()); + if (hasJobId()) { + result = result && getJobId() + .equals(other.getJobId()); + } + result = result && (hasWorkersNumber() == other.hasWorkersNumber()); + if (hasWorkersNumber()) { + result = result && (getWorkersNumber() + == other.getWorkersNumber()); + } + result = result && (hasBandwidth() == other.hasBandwidth()); + if (hasBandwidth()) { + result = result && (getBandwidth() + == other.getBandwidth()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + } + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getType()); + } + if (hasTargetRootDir()) { + hash = (37 * hash) + TARGET_ROOT_DIR_FIELD_NUMBER; + hash = (53 * hash) + getTargetRootDir().hashCode(); + } + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); + } + if (hasPhase()) { + hash = (37 * hash) + PHASE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getPhase()); + } + if (hasFailedMessage()) { + hash = (37 * hash) + FAILED_MESSAGE_FIELD_NUMBER; + hash = (53 * hash) + getFailedMessage().hashCode(); + } + if (getTableBackupStatusCount() > 0) { + hash = (37 * hash) + TABLE_BACKUP_STATUS_FIELD_NUMBER; + hash = (53 * hash) + getTableBackupStatusList().hashCode(); + } + if (hasStartTs()) { + hash = (37 * hash) + START_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartTs()); + } + if (hasEndTs()) { + hash = (37 * hash) + END_TS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getEndTs()); + } + if (hasProgress()) { + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = (53 * hash) + getProgress(); + } + if (hasJobId()) { + hash = (37 * hash) + JOB_ID_FIELD_NUMBER; + hash = (53 * hash) + getJobId().hashCode(); + } + if (hasWorkersNumber()) { + hash = (37 * hash) + WORKERS_NUMBER_FIELD_NUMBER; + hash = (53 * hash) + getWorkersNumber(); + } + if (hasBandwidth()) { + hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getBandwidth()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableBackupStatusFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + backupId_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + bitField0_ = (bitField0_ & ~0x00000002); + targetRootDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.WAITING; + bitField0_ = (bitField0_ & ~0x00000008); + phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.REQUEST; + bitField0_ = (bitField0_ & ~0x00000010); + failedMessage_ = ""; + bitField0_ = (bitField0_ & ~0x00000020); + if (tableBackupStatusBuilder_ == null) { + tableBackupStatus_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + } else { + tableBackupStatusBuilder_.clear(); + } + startTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000080); + endTs_ = 0L; + bitField0_ = (bitField0_ & ~0x00000100); + progress_ = 0; + bitField0_ = (bitField0_ & ~0x00000200); + jobId_ = ""; + bitField0_ = (bitField0_ & ~0x00000400); + workersNumber_ = 0; + bitField0_ = (bitField0_ & ~0x00000800); + bandwidth_ = 0L; + bitField0_ = (bitField0_ & ~0x00001000); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupInfo_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.backupId_ = backupId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.type_ = type_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.targetRootDir_ = targetRootDir_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.state_ = state_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.phase_ = phase_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.failedMessage_ = failedMessage_; + if (tableBackupStatusBuilder_ == null) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { + tableBackupStatus_ = java.util.Collections.unmodifiableList(tableBackupStatus_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.tableBackupStatus_ = tableBackupStatus_; + } else { + result.tableBackupStatus_ = tableBackupStatusBuilder_.build(); + } + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000040; + } + result.startTs_ = startTs_; + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { + to_bitField0_ |= 0x00000080; + } + result.endTs_ = endTs_; + if (((from_bitField0_ & 0x00000200) == 0x00000200)) { + to_bitField0_ |= 0x00000100; + } + result.progress_ = progress_; + if (((from_bitField0_ & 0x00000400) == 0x00000400)) { + to_bitField0_ |= 0x00000200; + } + result.jobId_ = jobId_; + if (((from_bitField0_ & 0x00000800) == 0x00000800)) { + to_bitField0_ |= 0x00000400; + } + result.workersNumber_ = workersNumber_; + if (((from_bitField0_ & 0x00001000) == 0x00001000)) { + to_bitField0_ |= 0x00000800; + } + result.bandwidth_ = bandwidth_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance()) return this; + if (other.hasBackupId()) { + bitField0_ |= 0x00000001; + backupId_ = other.backupId_; + onChanged(); + } + if (other.hasType()) { + setType(other.getType()); + } + if (other.hasTargetRootDir()) { + bitField0_ |= 0x00000004; + targetRootDir_ = other.targetRootDir_; + onChanged(); + } + if (other.hasState()) { + setState(other.getState()); + } + if (other.hasPhase()) { + setPhase(other.getPhase()); + } + if (other.hasFailedMessage()) { + bitField0_ |= 0x00000020; + failedMessage_ = other.failedMessage_; + onChanged(); + } + if (tableBackupStatusBuilder_ == null) { + if (!other.tableBackupStatus_.isEmpty()) { + if (tableBackupStatus_.isEmpty()) { + tableBackupStatus_ = other.tableBackupStatus_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.addAll(other.tableBackupStatus_); + } + onChanged(); + } + } else { + if (!other.tableBackupStatus_.isEmpty()) { + if (tableBackupStatusBuilder_.isEmpty()) { + tableBackupStatusBuilder_.dispose(); + tableBackupStatusBuilder_ = null; + tableBackupStatus_ = other.tableBackupStatus_; + bitField0_ = (bitField0_ & ~0x00000040); + tableBackupStatusBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableBackupStatusFieldBuilder() : null; + } else { + tableBackupStatusBuilder_.addAllMessages(other.tableBackupStatus_); + } + } + } + if (other.hasStartTs()) { + setStartTs(other.getStartTs()); + } + if (other.hasEndTs()) { + setEndTs(other.getEndTs()); + } + if (other.hasProgress()) { + setProgress(other.getProgress()); + } + if (other.hasJobId()) { + bitField0_ |= 0x00000400; + jobId_ = other.jobId_; + onChanged(); + } + if (other.hasWorkersNumber()) { + setWorkersNumber(other.getWorkersNumber()); + } + if (other.hasBandwidth()) { + setBandwidth(other.getBandwidth()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBackupId()) { + + return false; + } + if (!hasType()) { + + return false; + } + if (!hasTargetRootDir()) { + + return false; + } + if (!hasWorkersNumber()) { + + return false; + } + if (!hasBandwidth()) { + + return false; + } + for (int i = 0; i < getTableBackupStatusCount(); i++) { + if (!getTableBackupStatus(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string backup_id = 1; + private java.lang.Object backupId_ = ""; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string backup_id = 1; + */ + public Builder setBackupId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; + onChanged(); + return this; + } + /** + * required string backup_id = 1; + */ + public Builder clearBackupId() { + bitField0_ = (bitField0_ & ~0x00000001); + backupId_ = getDefaultInstance().getBackupId(); + onChanged(); + return this; + } + /** + * required string backup_id = 1; + */ + public Builder setBackupIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; + onChanged(); + return this; + } + + // required .hbase.pb.BackupType type = 2; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + /** + * required .hbase.pb.BackupType type = 2; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.BackupType type = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { + return type_; + } + /** + * required .hbase.pb.BackupType type = 2; + */ + public Builder setType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + type_ = value; + onChanged(); + return this; + } + /** + * required .hbase.pb.BackupType type = 2; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000002); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + onChanged(); + return this; + } + + // required string target_root_dir = 3; + private java.lang.Object targetRootDir_ = ""; + /** + * required string target_root_dir = 3; + */ + public boolean hasTargetRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string target_root_dir = 3; + */ + public java.lang.String getTargetRootDir() { + java.lang.Object ref = targetRootDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetRootDir_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string target_root_dir = 3; + */ + public com.google.protobuf.ByteString + getTargetRootDirBytes() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetRootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string target_root_dir = 3; + */ + public Builder setTargetRootDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + targetRootDir_ = value; + onChanged(); + return this; + } + /** + * required string target_root_dir = 3; + */ + public Builder clearTargetRootDir() { + bitField0_ = (bitField0_ & ~0x00000004); + targetRootDir_ = getDefaultInstance().getTargetRootDir(); + onChanged(); + return this; + } + /** + * required string target_root_dir = 3; + */ + public Builder setTargetRootDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + targetRootDir_ = value; + onChanged(); + return this; + } + + // optional .hbase.pb.BackupInfo.BackupState state = 4; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.WAITING; + /** + * optional .hbase.pb.BackupInfo.BackupState state = 4; + */ + public boolean hasState() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .hbase.pb.BackupInfo.BackupState state = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState getState() { + return state_; + } + /** + * optional .hbase.pb.BackupInfo.BackupState state = 4; + */ + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + state_ = value; + onChanged(); + return this; + } + /** + * optional .hbase.pb.BackupInfo.BackupState state = 4; + */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000008); + state_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupState.WAITING; + onChanged(); + return this; + } + + // optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.REQUEST; + /** + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + */ + public boolean hasPhase() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase getPhase() { + return phase_; + } + /** + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + */ + public Builder setPhase(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + phase_ = value; + onChanged(); + return this; + } + /** + * optional .hbase.pb.BackupInfo.BackupPhase phase = 5; + */ + public Builder clearPhase() { + bitField0_ = (bitField0_ & ~0x00000010); + phase_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.BackupPhase.REQUEST; + onChanged(); + return this; + } + + // optional string failed_message = 6; + private java.lang.Object failedMessage_ = ""; + /** + * optional string failed_message = 6; + */ + public boolean hasFailedMessage() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional string failed_message = 6; + */ + public java.lang.String getFailedMessage() { + java.lang.Object ref = failedMessage_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + failedMessage_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string failed_message = 6; + */ + public com.google.protobuf.ByteString + getFailedMessageBytes() { + java.lang.Object ref = failedMessage_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + failedMessage_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string failed_message = 6; + */ + public Builder setFailedMessage( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + failedMessage_ = value; + onChanged(); + return this; + } + /** + * optional string failed_message = 6; + */ + public Builder clearFailedMessage() { + bitField0_ = (bitField0_ & ~0x00000020); + failedMessage_ = getDefaultInstance().getFailedMessage(); + onChanged(); + return this; + } + /** + * optional string failed_message = 6; + */ + public Builder setFailedMessageBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + failedMessage_ = value; + onChanged(); + return this; + } + + // repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + private java.util.List tableBackupStatus_ = + java.util.Collections.emptyList(); + private void ensureTableBackupStatusIsMutable() { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { + tableBackupStatus_ = new java.util.ArrayList(tableBackupStatus_); + bitField0_ |= 0x00000040; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder> tableBackupStatusBuilder_; + + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public java.util.List getTableBackupStatusList() { + if (tableBackupStatusBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableBackupStatus_); + } else { + return tableBackupStatusBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public int getTableBackupStatusCount() { + if (tableBackupStatusBuilder_ == null) { + return tableBackupStatus_.size(); + } else { + return tableBackupStatusBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus getTableBackupStatus(int index) { + if (tableBackupStatusBuilder_ == null) { + return tableBackupStatus_.get(index); + } else { + return tableBackupStatusBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder setTableBackupStatus( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) { + if (tableBackupStatusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.set(index, value); + onChanged(); + } else { + tableBackupStatusBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder setTableBackupStatus( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.set(index, builderForValue.build()); + onChanged(); + } else { + tableBackupStatusBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder addTableBackupStatus(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) { + if (tableBackupStatusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.add(value); + onChanged(); + } else { + tableBackupStatusBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder addTableBackupStatus( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus value) { + if (tableBackupStatusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.add(index, value); + onChanged(); + } else { + tableBackupStatusBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder addTableBackupStatus( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.add(builderForValue.build()); + onChanged(); + } else { + tableBackupStatusBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder addTableBackupStatus( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder builderForValue) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.add(index, builderForValue.build()); + onChanged(); + } else { + tableBackupStatusBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder addAllTableBackupStatus( + java.lang.Iterable values) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + super.addAll(values, tableBackupStatus_); + onChanged(); + } else { + tableBackupStatusBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder clearTableBackupStatus() { + if (tableBackupStatusBuilder_ == null) { + tableBackupStatus_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + } else { + tableBackupStatusBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public Builder removeTableBackupStatus(int index) { + if (tableBackupStatusBuilder_ == null) { + ensureTableBackupStatusIsMutable(); + tableBackupStatus_.remove(index); + onChanged(); + } else { + tableBackupStatusBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder getTableBackupStatusBuilder( + int index) { + return getTableBackupStatusFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder getTableBackupStatusOrBuilder( + int index) { + if (tableBackupStatusBuilder_ == null) { + return tableBackupStatus_.get(index); } else { + return tableBackupStatusBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public java.util.List + getTableBackupStatusOrBuilderList() { + if (tableBackupStatusBuilder_ != null) { + return tableBackupStatusBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableBackupStatus_); + } + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder addTableBackupStatusBuilder() { + return getTableBackupStatusFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder addTableBackupStatusBuilder( + int index) { + return getTableBackupStatusFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableBackupStatus table_backup_status = 7; + */ + public java.util.List + getTableBackupStatusBuilderList() { + return getTableBackupStatusFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder> + getTableBackupStatusFieldBuilder() { + if (tableBackupStatusBuilder_ == null) { + tableBackupStatusBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.TableBackupStatusOrBuilder>( + tableBackupStatus_, + ((bitField0_ & 0x00000040) == 0x00000040), + getParentForChildren(), + isClean()); + tableBackupStatus_ = null; + } + return tableBackupStatusBuilder_; + } + + // optional uint64 start_ts = 8; + private long startTs_ ; + /** + * optional uint64 start_ts = 8; + */ + public boolean hasStartTs() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional uint64 start_ts = 8; + */ + public long getStartTs() { + return startTs_; + } + /** + * optional uint64 start_ts = 8; + */ + public Builder setStartTs(long value) { + bitField0_ |= 0x00000080; + startTs_ = value; + onChanged(); + return this; + } + /** + * optional uint64 start_ts = 8; + */ + public Builder clearStartTs() { + bitField0_ = (bitField0_ & ~0x00000080); + startTs_ = 0L; + onChanged(); + return this; + } + + // optional uint64 end_ts = 9; + private long endTs_ ; + /** + * optional uint64 end_ts = 9; + */ + public boolean hasEndTs() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional uint64 end_ts = 9; + */ + public long getEndTs() { + return endTs_; + } + /** + * optional uint64 end_ts = 9; + */ + public Builder setEndTs(long value) { + bitField0_ |= 0x00000100; + endTs_ = value; + onChanged(); + return this; + } + /** + * optional uint64 end_ts = 9; + */ + public Builder clearEndTs() { + bitField0_ = (bitField0_ & ~0x00000100); + endTs_ = 0L; + onChanged(); + return this; + } + + // optional uint32 progress = 10; + private int progress_ ; + /** + * optional uint32 progress = 10; + */ + public boolean hasProgress() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional uint32 progress = 10; + */ + public int getProgress() { + return progress_; + } + /** + * optional uint32 progress = 10; + */ + public Builder setProgress(int value) { + bitField0_ |= 0x00000200; + progress_ = value; + onChanged(); + return this; + } + /** + * optional uint32 progress = 10; + */ + public Builder clearProgress() { + bitField0_ = (bitField0_ & ~0x00000200); + progress_ = 0; + onChanged(); + return this; + } + + // optional string job_id = 11; + private java.lang.Object jobId_ = ""; + /** + * optional string job_id = 11; + */ + public boolean hasJobId() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional string job_id = 11; + */ + public java.lang.String getJobId() { + java.lang.Object ref = jobId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + jobId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string job_id = 11; + */ + public com.google.protobuf.ByteString + getJobIdBytes() { + java.lang.Object ref = jobId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + jobId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string job_id = 11; + */ + public Builder setJobId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + jobId_ = value; + onChanged(); + return this; + } + /** + * optional string job_id = 11; + */ + public Builder clearJobId() { + bitField0_ = (bitField0_ & ~0x00000400); + jobId_ = getDefaultInstance().getJobId(); + onChanged(); + return this; + } + /** + * optional string job_id = 11; + */ + public Builder setJobIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + jobId_ = value; + onChanged(); + return this; + } + + // required uint32 workers_number = 12; + private int workersNumber_ ; + /** + * required uint32 workers_number = 12; + */ + public boolean hasWorkersNumber() { + return ((bitField0_ & 0x00000800) == 0x00000800); + } + /** + * required uint32 workers_number = 12; + */ + public int getWorkersNumber() { + return workersNumber_; + } + /** + * required uint32 workers_number = 12; + */ + public Builder setWorkersNumber(int value) { + bitField0_ |= 0x00000800; + workersNumber_ = value; + onChanged(); + return this; + } + /** + * required uint32 workers_number = 12; + */ + public Builder clearWorkersNumber() { + bitField0_ = (bitField0_ & ~0x00000800); + workersNumber_ = 0; + onChanged(); + return this; + } + + // required uint64 bandwidth = 13; + private long bandwidth_ ; + /** + * required uint64 bandwidth = 13; + */ + public boolean hasBandwidth() { + return ((bitField0_ & 0x00001000) == 0x00001000); + } + /** + * required uint64 bandwidth = 13; + */ + public long getBandwidth() { + return bandwidth_; + } + /** + * required uint64 bandwidth = 13; + */ + public Builder setBandwidth(long value) { + bitField0_ |= 0x00001000; + bandwidth_ = value; + onChanged(); + return this; + } + /** + * required uint64 bandwidth = 13; + */ + public Builder clearBandwidth() { + bitField0_ = (bitField0_ & ~0x00001000); + bandwidth_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupInfo) + } + + static { + defaultInstance = new BackupInfo(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BackupInfo) + } + + public interface BackupProcContextOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.BackupInfo ctx = 1; + /** + * required .hbase.pb.BackupInfo ctx = 1; + */ + boolean hasCtx(); + /** + * required .hbase.pb.BackupInfo ctx = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo getCtx(); + /** + * required .hbase.pb.BackupInfo ctx = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder getCtxOrBuilder(); + + // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + java.util.List + getServerTimestampList(); + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index); + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + int getServerTimestampCount(); + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + java.util.List + getServerTimestampOrBuilderList(); + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.BackupProcContext} + */ + public static final class BackupProcContext extends + com.google.protobuf.GeneratedMessage + implements BackupProcContextOrBuilder { + // Use BackupProcContext.newBuilder() to construct. + private BackupProcContext(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BackupProcContext(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BackupProcContext defaultInstance; + public static BackupProcContext getDefaultInstance() { + return defaultInstance; + } + + public BackupProcContext getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BackupProcContext( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = ctx_.toBuilder(); + } + ctx_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(ctx_); + ctx_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + serverTimestamp_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = java.util.Collections.unmodifiableList(serverTimestamp_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupProcContext_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupProcContext_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BackupProcContext parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BackupProcContext(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.BackupInfo ctx = 1; + public static final int CTX_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo ctx_; + /** + * required .hbase.pb.BackupInfo ctx = 1; + */ + public boolean hasCtx() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.BackupInfo ctx = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo getCtx() { + return ctx_; + } + /** + * required .hbase.pb.BackupInfo ctx = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder getCtxOrBuilder() { + return ctx_; + } + + // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + public static final int SERVER_TIMESTAMP_FIELD_NUMBER = 2; + private java.util.List serverTimestamp_; + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List getServerTimestampList() { + return serverTimestamp_; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List + getServerTimestampOrBuilderList() { + return serverTimestamp_; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public int getServerTimestampCount() { + return serverTimestamp_.size(); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index) { + return serverTimestamp_.get(index); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( + int index) { + return serverTimestamp_.get(index); + } + + private void initFields() { + ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance(); + serverTimestamp_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasCtx()) { + memoizedIsInitialized = 0; + return false; + } + if (!getCtx().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getServerTimestampCount(); i++) { + if (!getServerTimestamp(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, ctx_); + } + for (int i = 0; i < serverTimestamp_.size(); i++) { + output.writeMessage(2, serverTimestamp_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, ctx_); + } + for (int i = 0; i < serverTimestamp_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, serverTimestamp_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext other = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext) obj; + + boolean result = true; + result = result && (hasCtx() == other.hasCtx()); + if (hasCtx()) { + result = result && getCtx() + .equals(other.getCtx()); + } + result = result && getServerTimestampList() + .equals(other.getServerTimestampList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasCtx()) { + hash = (37 * hash) + CTX_FIELD_NUMBER; + hash = (53 * hash) + getCtx().hashCode(); + } + if (getServerTimestampCount() > 0) { + hash = (37 * hash) + SERVER_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + getServerTimestampList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupProcContext} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContextOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupProcContext_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupProcContext_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.class, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getCtxFieldBuilder(); + getServerTimestampFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (ctxBuilder_ == null) { + ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance(); + } else { + ctxBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (serverTimestampBuilder_ == null) { + serverTimestamp_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + serverTimestampBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.internal_static_hbase_pb_BackupProcContext_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext build() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext result = new org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (ctxBuilder_ == null) { + result.ctx_ = ctx_; + } else { + result.ctx_ = ctxBuilder_.build(); + } + if (serverTimestampBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = java.util.Collections.unmodifiableList(serverTimestamp_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.serverTimestamp_ = serverTimestamp_; + } else { + result.serverTimestamp_ = serverTimestampBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext.getDefaultInstance()) return this; + if (other.hasCtx()) { + mergeCtx(other.getCtx()); + } + if (serverTimestampBuilder_ == null) { + if (!other.serverTimestamp_.isEmpty()) { + if (serverTimestamp_.isEmpty()) { + serverTimestamp_ = other.serverTimestamp_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureServerTimestampIsMutable(); + serverTimestamp_.addAll(other.serverTimestamp_); + } + onChanged(); + } + } else { + if (!other.serverTimestamp_.isEmpty()) { + if (serverTimestampBuilder_.isEmpty()) { + serverTimestampBuilder_.dispose(); + serverTimestampBuilder_ = null; + serverTimestamp_ = other.serverTimestamp_; + bitField0_ = (bitField0_ & ~0x00000002); + serverTimestampBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getServerTimestampFieldBuilder() : null; + } else { + serverTimestampBuilder_.addAllMessages(other.serverTimestamp_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasCtx()) { + + return false; + } + if (!getCtx().isInitialized()) { + + return false; + } + for (int i = 0; i < getServerTimestampCount(); i++) { + if (!getServerTimestamp(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupProcContext) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.BackupInfo ctx = 1; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder> ctxBuilder_; + /** + * required .hbase.pb.BackupInfo ctx = 1; + */ + public boolean hasCtx() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.BackupInfo ctx = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo getCtx() { + if (ctxBuilder_ == null) { + return ctx_; + } else { + return ctxBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.BackupInfo ctx = 1; + */ + public Builder setCtx(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo value) { + if (ctxBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ctx_ = value; + onChanged(); + } else { + ctxBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.BackupInfo ctx = 1; + */ + public Builder setCtx( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder builderForValue) { + if (ctxBuilder_ == null) { + ctx_ = builderForValue.build(); + onChanged(); + } else { + ctxBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.BackupInfo ctx = 1; + */ + public Builder mergeCtx(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo value) { + if (ctxBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + ctx_ != org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance()) { + ctx_ = + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.newBuilder(ctx_).mergeFrom(value).buildPartial(); + } else { + ctx_ = value; + } + onChanged(); + } else { + ctxBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.BackupInfo ctx = 1; + */ + public Builder clearCtx() { + if (ctxBuilder_ == null) { + ctx_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.getDefaultInstance(); + onChanged(); + } else { + ctxBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.BackupInfo ctx = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder getCtxBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getCtxFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.BackupInfo ctx = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder getCtxOrBuilder() { + if (ctxBuilder_ != null) { + return ctxBuilder_.getMessageOrBuilder(); + } else { + return ctx_; + } + } + /** + * required .hbase.pb.BackupInfo ctx = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder> + getCtxFieldBuilder() { + if (ctxBuilder_ == null) { + ctxBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupInfoOrBuilder>( + ctx_, + getParentForChildren(), + isClean()); + ctx_ = null; + } + return ctxBuilder_; + } + + // repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + private java.util.List serverTimestamp_ = + java.util.Collections.emptyList(); + private void ensureServerTimestampIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + serverTimestamp_ = new java.util.ArrayList(serverTimestamp_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder> serverTimestampBuilder_; + + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List getServerTimestampList() { + if (serverTimestampBuilder_ == null) { + return java.util.Collections.unmodifiableList(serverTimestamp_); + } else { + return serverTimestampBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public int getServerTimestampCount() { + if (serverTimestampBuilder_ == null) { + return serverTimestamp_.size(); + } else { + return serverTimestampBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp getServerTimestamp(int index) { + if (serverTimestampBuilder_ == null) { + return serverTimestamp_.get(index); + } else { + return serverTimestampBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder setServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { + if (serverTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerTimestampIsMutable(); + serverTimestamp_.set(index, value); + onChanged(); + } else { + serverTimestampBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder setServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.set(index, builderForValue.build()); + onChanged(); + } else { + serverTimestampBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addServerTimestamp(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { + if (serverTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerTimestampIsMutable(); + serverTimestamp_.add(value); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp value) { + if (serverTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServerTimestampIsMutable(); + serverTimestamp_.add(index, value); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addServerTimestamp( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.add(builderForValue.build()); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addServerTimestamp( + int index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder builderForValue) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.add(index, builderForValue.build()); + onChanged(); + } else { + serverTimestampBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder addAllServerTimestamp( + java.lang.Iterable values) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + super.addAll(values, serverTimestamp_); + onChanged(); + } else { + serverTimestampBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder clearServerTimestamp() { + if (serverTimestampBuilder_ == null) { + serverTimestamp_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + serverTimestampBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public Builder removeServerTimestamp(int index) { + if (serverTimestampBuilder_ == null) { + ensureServerTimestampIsMutable(); + serverTimestamp_.remove(index); + onChanged(); + } else { + serverTimestampBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder getServerTimestampBuilder( + int index) { + return getServerTimestampFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder getServerTimestampOrBuilder( + int index) { + if (serverTimestampBuilder_ == null) { + return serverTimestamp_.get(index); } else { + return serverTimestampBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List + getServerTimestampOrBuilderList() { + if (serverTimestampBuilder_ != null) { + return serverTimestampBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(serverTimestamp_); + } + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder addServerTimestampBuilder() { + return getServerTimestampFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder addServerTimestampBuilder( + int index) { + return getServerTimestampFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerTimestamp server_timestamp = 2; + */ + public java.util.List + getServerTimestampBuilderList() { + return getServerTimestampFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder> + getServerTimestampFieldBuilder() { + if (serverTimestampBuilder_ == null) { + serverTimestampBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp.Builder, org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestampOrBuilder>( + serverTimestamp_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + serverTimestamp_ = null; + } + return serverTimestampBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupProcContext) + } + + static { + defaultInstance = new BackupProcContext(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BackupProcContext) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_SnapshotTableStateData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_BackupImage_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_BackupImage_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ServerTimestamp_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_TableServerTimestamp_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_BackupManifest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_BackupManifest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_TableBackupStatus_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_BackupInfo_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_BackupInfo_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_BackupProcContext_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_BackupProcContext_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\014Backup.proto\022\010hbase.pb\032\013HBase.proto\"R\n" + + "\026SnapshotTableStateData\022\"\n\005table\030\001 \002(\0132\023" + + ".hbase.pb.TableName\022\024\n\014snapshotName\030\002 \002(" + + "\t\"\327\001\n\013BackupImage\022\021\n\tbackup_id\030\001 \002(\t\022)\n\013" + + "backup_type\030\002 \002(\0162\024.hbase.pb.BackupType\022" + + "\020\n\010root_dir\030\003 \002(\t\022\'\n\ntable_list\030\004 \003(\0132\023." + + "hbase.pb.TableName\022\020\n\010start_ts\030\005 \002(\004\022\023\n\013" + + "complete_ts\030\006 \002(\004\022(\n\tancestors\030\007 \003(\0132\025.h" + + "base.pb.BackupImage\"4\n\017ServerTimestamp\022\016" + + "\n\006server\030\001 \002(\t\022\021\n\ttimestamp\030\002 \002(\004\"o\n\024Tab", + "leServerTimestamp\022\"\n\005table\030\001 \002(\0132\023.hbase" + + ".pb.TableName\0223\n\020server_timestamp\030\002 \003(\0132" + + "\031.hbase.pb.ServerTimestamp\"\220\002\n\016BackupMan" + + "ifest\022\017\n\007version\030\001 \002(\t\022\021\n\tbackup_id\030\002 \002(" + + "\t\022\"\n\004type\030\003 \002(\0162\024.hbase.pb.BackupType\022\'\n" + + "\ntable_list\030\004 \003(\0132\023.hbase.pb.TableName\022\020" + + "\n\010start_ts\030\005 \002(\004\022\023\n\013complete_ts\030\006 \002(\004\022/\n" + + "\007tst_map\030\007 \003(\0132\036.hbase.pb.TableServerTim" + + "estamp\0225\n\026dependent_backup_image\030\010 \003(\0132\025" + + ".hbase.pb.BackupImage\"]\n\021TableBackupStat", + "us\022\"\n\005table\030\001 \002(\0132\023.hbase.pb.TableName\022\022" + + "\n\ntarget_dir\030\002 \002(\t\022\020\n\010snapshot\030\003 \001(\t\"\320\004\n" + + "\nBackupInfo\022\021\n\tbackup_id\030\001 \002(\t\022\"\n\004type\030\002" + + " \002(\0162\024.hbase.pb.BackupType\022\027\n\017target_roo" + + "t_dir\030\003 \002(\t\022/\n\005state\030\004 \001(\0162 .hbase.pb.Ba" + + "ckupInfo.BackupState\022/\n\005phase\030\005 \001(\0162 .hb" + + "ase.pb.BackupInfo.BackupPhase\022\026\n\016failed_" + + "message\030\006 \001(\t\0228\n\023table_backup_status\030\007 \003" + + "(\0132\033.hbase.pb.TableBackupStatus\022\020\n\010start" + + "_ts\030\010 \001(\004\022\016\n\006end_ts\030\t \001(\004\022\020\n\010progress\030\n ", + "\001(\r\022\016\n\006job_id\030\013 \001(\t\022\026\n\016workers_number\030\014 " + + "\002(\r\022\021\n\tbandwidth\030\r \002(\004\"P\n\013BackupState\022\013\n" + + "\007WAITING\020\000\022\013\n\007RUNNING\020\001\022\014\n\010COMPLETE\020\002\022\n\n" + + "\006FAILED\020\003\022\r\n\tCANCELLED\020\004\"}\n\013BackupPhase\022" + + "\013\n\007REQUEST\020\000\022\014\n\010SNAPSHOT\020\001\022\027\n\023PREPARE_IN" + + "CREMENTAL\020\002\022\020\n\014SNAPSHOTCOPY\020\003\022\024\n\020INCREME" + + "NTAL_COPY\020\004\022\022\n\016STORE_MANIFEST\020\005\"k\n\021Backu" + + "pProcContext\022!\n\003ctx\030\001 \002(\0132\024.hbase.pb.Bac" + + "kupInfo\0223\n\020server_timestamp\030\002 \003(\0132\031.hbas" + + "e.pb.ServerTimestamp*k\n\024FullTableBackupS", + "tate\022\026\n\022PRE_SNAPSHOT_TABLE\020\001\022\023\n\017SNAPSHOT" + + "_TABLES\020\002\022\021\n\rSNAPSHOT_COPY\020\003\022\023\n\017BACKUP_C" + + "OMPLETE\020\004*f\n\033IncrementalTableBackupState" + + "\022\027\n\023PREPARE_INCREMENTAL\020\001\022\024\n\020INCREMENTAL" + + "_COPY\020\002\022\030\n\024INCR_BACKUP_COMPLETE\020\003*\'\n\nBac" + + "kupType\022\010\n\004FULL\020\000\022\017\n\013INCREMENTAL\020\001BB\n*or" + + "g.apache.hadoop.hbase.protobuf.generated" + + "B\014BackupProtosH\001\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_hbase_pb_SnapshotTableStateData_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_hbase_pb_SnapshotTableStateData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_SnapshotTableStateData_descriptor, + new java.lang.String[] { "Table", "SnapshotName", }); + internal_static_hbase_pb_BackupImage_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_hbase_pb_BackupImage_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_BackupImage_descriptor, + new java.lang.String[] { "BackupId", "BackupType", "RootDir", "TableList", "StartTs", "CompleteTs", "Ancestors", }); + internal_static_hbase_pb_ServerTimestamp_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_hbase_pb_ServerTimestamp_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_ServerTimestamp_descriptor, + new java.lang.String[] { "Server", "Timestamp", }); + internal_static_hbase_pb_TableServerTimestamp_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_hbase_pb_TableServerTimestamp_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_TableServerTimestamp_descriptor, + new java.lang.String[] { "Table", "ServerTimestamp", }); + internal_static_hbase_pb_BackupManifest_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_hbase_pb_BackupManifest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_BackupManifest_descriptor, + new java.lang.String[] { "Version", "BackupId", "Type", "TableList", "StartTs", "CompleteTs", "TstMap", "DependentBackupImage", }); + internal_static_hbase_pb_TableBackupStatus_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_hbase_pb_TableBackupStatus_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_TableBackupStatus_descriptor, + new java.lang.String[] { "Table", "TargetDir", "Snapshot", }); + internal_static_hbase_pb_BackupInfo_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_hbase_pb_BackupInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_BackupInfo_descriptor, + new java.lang.String[] { "BackupId", "Type", "TargetRootDir", "State", "Phase", "FailedMessage", "TableBackupStatus", "StartTs", "EndTs", "Progress", "JobId", "WorkersNumber", "Bandwidth", }); + internal_static_hbase_pb_BackupProcContext_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_hbase_pb_BackupProcContext_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_BackupProcContext_descriptor, + new java.lang.String[] { "Ctx", "ServerTimestamp", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hbase-protocol/src/main/protobuf/Backup.proto b/hbase-protocol/src/main/protobuf/Backup.proto new file mode 100644 index 0000000..7d1ec4b --- /dev/null +++ b/hbase-protocol/src/main/protobuf/Backup.proto @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This file contains Backup manifest +package hbase.pb; + +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "BackupProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "HBase.proto"; + +enum FullTableBackupState { + PRE_SNAPSHOT_TABLE = 1; + SNAPSHOT_TABLES = 2; + SNAPSHOT_COPY = 3; + BACKUP_COMPLETE = 4; +} + +enum IncrementalTableBackupState { + PREPARE_INCREMENTAL = 1; + INCREMENTAL_COPY = 2; + INCR_BACKUP_COMPLETE = 3; +} + +message SnapshotTableStateData { + required TableName table = 1; + required string snapshotName = 2; +} + +enum BackupType { + FULL = 0; + INCREMENTAL = 1; +} + +message BackupImage { + required string backup_id = 1; + required BackupType backup_type = 2; + required string root_dir = 3; + repeated TableName table_list = 4; + required uint64 start_ts = 5; + required uint64 complete_ts = 6; + repeated BackupImage ancestors = 7; +} + +message ServerTimestamp { + required string server = 1; + required uint64 timestamp = 2; +} + +message TableServerTimestamp { + required TableName table = 1; + repeated ServerTimestamp server_timestamp = 2; +} + +message BackupManifest { + required string version = 1; + required string backup_id = 2; + required BackupType type = 3; + repeated TableName table_list = 4; + required uint64 start_ts = 5; + required uint64 complete_ts = 6; + repeated TableServerTimestamp tst_map = 7; + repeated BackupImage dependent_backup_image = 8; +} + +message TableBackupStatus { + required TableName table = 1; + required string target_dir = 2; + optional string snapshot = 3; +} + +message BackupInfo { + required string backup_id = 1; + required BackupType type = 2; + required string target_root_dir = 3; + optional BackupState state = 4; + optional BackupPhase phase = 5; + optional string failed_message = 6; + repeated TableBackupStatus table_backup_status = 7; + optional uint64 start_ts = 8; + optional uint64 end_ts = 9; + optional uint32 progress = 10; + optional string job_id = 11; + required uint32 workers_number = 12; + required uint64 bandwidth = 13; + + enum BackupState { + WAITING = 0; + RUNNING = 1; + COMPLETE = 2; + FAILED = 3; + CANCELLED = 4; + } + + enum BackupPhase { + REQUEST = 0; + SNAPSHOT = 1; + PREPARE_INCREMENTAL = 2; + SNAPSHOTCOPY = 3; + INCREMENTAL_COPY = 4; + STORE_MANIFEST = 5; + } +} + +message BackupProcContext { + required BackupInfo ctx = 1; + repeated ServerTimestamp server_timestamp = 2; +} + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyService.java new file mode 100644 index 0000000..26e20f1 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupCopyService.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.backup.impl.BackupManager; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface BackupCopyService extends Configurable { + static enum Type { + FULL, INCREMENTAL + } + + /** + * Copy backup data + * @param backupContext - context + * @param backupManager - manager + * @param conf - configuration + * @param copyType - copy type + * @param options - array of options (implementation-specific) + * @return result (0 - success) + * @throws IOException + */ + public int copy(BackupInfo backupContext, BackupManager backupManager, Configuration conf, + BackupCopyService.Type copyType, String[] options) throws IOException; + + + /** + * Cancel copy job + * @param jobHandler - copy job handler + * @throws IOException + */ + public void cancelCopyJob(String jobHandler) throws IOException; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java new file mode 100644 index 0000000..f692bd0 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupDriver.java @@ -0,0 +1,136 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import java.io.IOException; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.backup.impl.BackupCommands; +import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; +import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants.BackupCommand; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.AbstractHBaseTool; +import org.apache.hadoop.hbase.util.LogUtils; +import org.apache.hadoop.util.ToolRunner; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BackupDriver extends AbstractHBaseTool { + + private static final Log LOG = LogFactory.getLog(BackupDriver.class); + private CommandLine cmd; + + public BackupDriver() throws IOException + { + init(); + } + + protected void init() throws IOException { + // define supported options + addOptNoArg("debug", "Enable debug loggings"); + addOptNoArg("all", "All tables"); + addOptWithArg("t", "Table name"); + addOptWithArg("b", "Bandwidth (MB/s)"); + addOptWithArg("w", "Number of workers"); + addOptWithArg("n", "History length"); + addOptWithArg("set", "Backup set name"); + + // disable irrelevant loggers to avoid it mess up command output + LogUtils.disableUselessLoggers(LOG); + } + + private int parseAndRun(String[] args) throws IOException { + String cmd = null; + String[] remainArgs = null; + if (args == null || args.length == 0) { + BackupCommands.createCommand(getConf(), + BackupRestoreConstants.BackupCommand.HELP, null).execute(); + } else { + cmd = args[0]; + remainArgs = new String[args.length - 1]; + if (args.length > 1) { + System.arraycopy(args, 1, remainArgs, 0, args.length - 1); + } + } + + BackupCommand type = BackupCommand.HELP; + if (BackupCommand.CREATE.name().equalsIgnoreCase(cmd)) { + type = BackupCommand.CREATE; + } else if (BackupCommand.HELP.name().equalsIgnoreCase(cmd)) { + type = BackupCommand.HELP; + } else if (BackupCommand.DELETE.name().equalsIgnoreCase(cmd)) { + type = BackupCommand.DELETE; + } else if (BackupCommand.DESCRIBE.name().equalsIgnoreCase(cmd)) { + type = BackupCommand.DESCRIBE; + } else if (BackupCommand.HISTORY.name().equalsIgnoreCase(cmd)) { + type = BackupCommand.HISTORY; + } else if (BackupCommand.PROGRESS.name().equalsIgnoreCase(cmd)) { + type = BackupCommand.PROGRESS; + } else if (BackupCommand.SET.name().equalsIgnoreCase(cmd)) { + type = BackupCommand.SET; + } else { + System.out.println("Unsupported command for backup: " + cmd); + return -1; + } + + // enable debug logging + Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup"); + if (this.cmd.hasOption("debug")) { + backupClientLogger.setLevel(Level.DEBUG); + } else { + backupClientLogger.setLevel(Level.INFO); + } + + // TODO: get rid of Command altogether? + BackupCommands.Command command = BackupCommands.createCommand(getConf(), type, this.cmd); + if( type == BackupCommand.CREATE && conf != null) { + ((BackupCommands.CreateCommand) command).setConf(conf); + } + command.execute(); + return 0; + } + + @Override + protected void addOptions() { + } + + @Override + protected void processOptions(CommandLine cmd) { + this.cmd = cmd; + } + + @Override + protected int doWork() throws Exception { + return parseAndRun(cmd.getArgs()); + } + + public static void main(String[] args) throws Exception { + Configuration conf = HBaseConfiguration.create(); + int ret = ToolRunner.run(conf, new BackupDriver(), args); + System.exit(ret); + } + + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreServerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreServerFactory.java new file mode 100644 index 0000000..25ec9d9 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreServerFactory.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupCopyService; +import org.apache.hadoop.hbase.backup.mapreduce.MapReduceRestoreService; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.util.ReflectionUtils; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class BackupRestoreServerFactory { + + public final static String HBASE_INCR_RESTORE_IMPL_CLASS = "hbase.incremental.restore.class"; + public final static String HBASE_BACKUP_COPY_IMPL_CLASS = "hbase.backup.copy.class"; + + private BackupRestoreServerFactory(){ + throw new AssertionError("Instantiating utility class..."); + } + + /** + * Gets incremental restore service + * @param conf - configuration + * @return incremental backup service instance + */ + public static IncrementalRestoreService getIncrementalRestoreService(Configuration conf) { + Class cls = + conf.getClass(HBASE_INCR_RESTORE_IMPL_CLASS, MapReduceRestoreService.class, + IncrementalRestoreService.class); + IncrementalRestoreService service = ReflectionUtils.newInstance(cls, conf); + service.setConf(conf); + return service; + } + + /** + * Gets backup copy service + * @param conf - configuration + * @return backup copy service + */ + public static BackupCopyService getBackupCopyService(Configuration conf) { + Class cls = + conf.getClass(HBASE_BACKUP_COPY_IMPL_CLASS, MapReduceBackupCopyService.class, + BackupCopyService.class); + BackupCopyService service = ReflectionUtils.newInstance(cls, conf);; + service.setConf(conf); + return service; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java new file mode 100644 index 0000000..9a8e14b --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HBackupFileSystem.java @@ -0,0 +1,138 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import java.io.IOException; +import java.util.HashMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupManifest; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * View to an on-disk Backup Image FileSytem + * Provides the set of methods necessary to interact with the on-disk Backup Image data. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class HBackupFileSystem { + public static final Log LOG = LogFactory.getLog(HBackupFileSystem.class); + + /** + * This is utility class. + */ + private HBackupFileSystem() { + } + + /** + * Given the backup root dir, backup id and the table name, return the backup image location, + * which is also where the backup manifest file is. return value look like: + * "hdfs://backup.hbase.org:9000/user/biadmin/backup1/backup_1396650096738/default/t1_dn/" + * @param backupRootDir backup root directory + * @param backupId backup id + * @param tableName table name + * @return backupPath String for the particular table + */ + public static String getTableBackupDir(String backupRootDir, String backupId, + TableName tableName) { + return backupRootDir + Path.SEPARATOR+ backupId + Path.SEPARATOR + + tableName.getNamespaceAsString() + Path.SEPARATOR + + tableName.getQualifierAsString() + Path.SEPARATOR ; + } + + /** + * Given the backup root dir, backup id and the table name, return the backup image location, + * which is also where the backup manifest file is. return value look like: + * "hdfs://backup.hbase.org:9000/user/biadmin/backup_1396650096738/backup1/default/t1_dn/" + * @param backupRootPath backup root path + * @param tableName table name + * @param backupId backup Id + * @return backupPath for the particular table + */ + public static Path getTableBackupPath(TableName tableName, Path backupRootPath, String backupId) { + return new Path(getTableBackupDir(backupRootPath.toString(), backupId, tableName)); + } + + /** + * Given the backup root dir and the backup id, return the log file location for an incremental + * backup. + * @param backupRootDir backup root directory + * @param backupId backup id + * @return logBackupDir: ".../user/biadmin/backup1/WALs/backup_1396650096738" + */ + public static String getLogBackupDir(String backupRootDir, String backupId) { + return backupRootDir + Path.SEPARATOR + backupId+ Path.SEPARATOR + + HConstants.HREGION_LOGDIR_NAME; + } + + public static Path getLogBackupPath(String backupRootDir, String backupId) { + return new Path(getLogBackupDir(backupRootDir, backupId)); + } + + private static Path getManifestPath(TableName tableName, Configuration conf, + Path backupRootPath, String backupId) throws IOException { + Path manifestPath = new Path(getTableBackupPath(tableName, backupRootPath, backupId), + BackupManifest.MANIFEST_FILE_NAME); + FileSystem fs = backupRootPath.getFileSystem(conf); + if (!fs.exists(manifestPath)) { + // check log dir for incremental backup case + manifestPath = + new Path(getLogBackupDir(backupRootPath.toString(), backupId) + Path.SEPARATOR + + BackupManifest.MANIFEST_FILE_NAME); + if (!fs.exists(manifestPath)) { + String errorMsg = + "Could not find backup manifest " + BackupManifest.MANIFEST_FILE_NAME + " for " + + backupId + " in " + backupRootPath.toString() + + ". Did " + backupId + " correspond to previously taken backup ?"; + throw new IOException(errorMsg); + } + } + return manifestPath; + } + + public static BackupManifest getManifest(TableName tableName, Configuration conf, + Path backupRootPath, String backupId) throws IOException { + BackupManifest manifest = new BackupManifest(conf, + getManifestPath(tableName, conf, backupRootPath, backupId)); + return manifest; + } + + /** + * Check whether the backup image path and there is manifest file in the path. + * @param backupManifestMap If all the manifests are found, then they are put into this map + * @param tableArray the tables involved + * @throws IOException exception + */ + public static void checkImageManifestExist(HashMap backupManifestMap, + TableName[] tableArray, Configuration conf, + Path backupRootPath, String backupId) throws IOException { + for (TableName tableName : tableArray) { + BackupManifest manifest = getManifest(tableName, conf, backupRootPath, backupId); + backupManifestMap.put(tableName, manifest); + } + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/IncrementalRestoreService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/IncrementalRestoreService.java new file mode 100644 index 0000000..ae48480 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/IncrementalRestoreService.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface IncrementalRestoreService extends Configurable{ + + /** + * Run restore operation + * @param logDirectoryPaths - path array of WAL log directories + * @param fromTables - from tables + * @param toTables - to tables + * @throws IOException + */ + public void run(Path[] logDirectoryPaths, TableName[] fromTables, TableName[] toTables) + throws IOException; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java new file mode 100644 index 0000000..c66ac6e --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java @@ -0,0 +1,195 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import java.io.IOException; +import java.util.List; + +import org.apache.commons.cli.CommandLine; +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.util.BackupServerUtil; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.util.AbstractHBaseTool; +import org.apache.hadoop.hbase.util.LogUtils; +import org.apache.hadoop.util.ToolRunner; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; + +public class RestoreDriver extends AbstractHBaseTool { + + private static final Log LOG = LogFactory.getLog(RestoreDriver.class); + private CommandLine cmd; + + private static final String OPTION_OVERWRITE = "overwrite"; + private static final String OPTION_CHECK = "check"; + private static final String OPTION_AUTOMATIC = "automatic"; + private static final String OPTION_SET = "set"; + private static final String OPTION_DEBUG = "debug"; + + + private static final String USAGE = + "Usage: hbase restore [-set set_name] [tableMapping] \n" + + " [-overwrite] [-check] [-automatic]\n" + + " backup_root_path The parent location where the backup images are stored\n" + + " backup_id The id identifying the backup image\n" + + " table(s) Table(s) from the backup image to be restored.\n" + + " Tables are separated by comma.\n" + + " Options:\n" + + " tableMapping A comma separated list of target tables.\n" + + " If specified, each table in must have a mapping.\n" + + " -overwrite With this option, restore overwrites to the existing table " + + "if there's any in\n" + + " restore target. The existing table must be online before restore.\n" + + " -check With this option, restore sequence and dependencies are checked\n" + + " and verified without executing the restore\n" + + " -set set_name Backup set to restore, mutually exclusive with table list ."; + + + protected RestoreDriver() throws IOException + { + init(); + } + + protected void init() throws IOException { + // define supported options + addOptNoArg(OPTION_OVERWRITE, + "Overwrite the data if any of the restore target tables exists"); + addOptNoArg(OPTION_CHECK, "Check restore sequence and dependencies"); + addOptNoArg(OPTION_AUTOMATIC, "Restore all dependencies"); + addOptNoArg(OPTION_DEBUG, "Enable debug logging"); + addOptWithArg(OPTION_SET, "Backup set name"); + + // disable irrelevant loggers to avoid it mess up command output + LogUtils.disableUselessLoggers(LOG); + } + + private int parseAndRun(String[] args) { + + // enable debug logging + Logger backupClientLogger = Logger.getLogger("org.apache.hadoop.hbase.backup"); + if (cmd.hasOption(OPTION_DEBUG)) { + backupClientLogger.setLevel(Level.DEBUG); + } + + // whether to overwrite to existing table if any, false by default + boolean isOverwrite = cmd.hasOption(OPTION_OVERWRITE); + if (isOverwrite) { + LOG.debug("Found -overwrite option in restore command, " + + "will overwrite to existing table if any in the restore target"); + } + + // whether to only check the dependencies, false by default + boolean check = cmd.hasOption(OPTION_CHECK); + if (check) { + LOG.debug("Found -check option in restore command, " + + "will check and verify the dependencies"); + } + + LOG.debug("Will automatically restore all the dependencies"); + + // parse main restore command options + String[] remainArgs = cmd.getArgs(); + if (remainArgs.length < 3 && !cmd.hasOption(OPTION_SET) || + (cmd.hasOption(OPTION_SET) && remainArgs.length < 2)) { + System.out.println("ERROR: remain args length="+ remainArgs.length); + System.out.println(USAGE); + return -1; + } + + String backupRootDir = remainArgs[0]; + String backupId = remainArgs[1]; + String tables = null; + String tableMapping = null; + // Check backup set + if (cmd.hasOption(OPTION_SET)) { + String setName = cmd.getOptionValue(OPTION_SET); + try{ + tables = getTablesForSet(setName, conf); + } catch(IOException e){ + System.out.println("ERROR: "+ e.getMessage()+" for setName="+setName); + return -2; + } + if (tables == null) { + System.out.println("ERROR: Backup set '" + setName + + "' is either empty or does not exist"); + return -3; + } + tableMapping = (remainArgs.length > 2) ? remainArgs[2] : null; + } else { + tables = remainArgs[2]; + tableMapping = (remainArgs.length > 3) ? remainArgs[3] : null; + } + + TableName[] sTableArray = BackupServerUtil.parseTableNames(tables); + TableName[] tTableArray = BackupServerUtil.parseTableNames(tableMapping); + + if (sTableArray != null && tTableArray != null && (sTableArray.length != tTableArray.length)) { + System.out.println("ERROR: table mapping mismatch: " + tables + " : " + tableMapping); + System.out.println(USAGE); + return -4; + } + + + RestoreClient client = BackupRestoreClientFactory.getRestoreClient(getConf()); + try{ + client.restore(backupRootDir, backupId, check, sTableArray, + tTableArray, isOverwrite); + } catch (Exception e){ + e.printStackTrace(); + return -5; + } + return 0; + } + + private String getTablesForSet(String name, Configuration conf) throws IOException { + try (final Connection conn = ConnectionFactory.createConnection(conf); + final BackupSystemTable table = new BackupSystemTable(conn)) { + List tables = table.describeBackupSet(name); + if (tables == null) return null; + return StringUtils.join(tables, BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND); + } + } + + @Override + protected void addOptions() { + } + + @Override + protected void processOptions(CommandLine cmd) { + this.cmd = cmd; + } + + @Override + protected int doWork() throws Exception { + return parseAndRun(cmd.getArgs()); + } + + public static void main(String[] args) throws Exception { + Configuration conf = HBaseConfiguration.create(); + int ret = ToolRunner.run(conf, new RestoreDriver(), args); + System.exit(ret); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java new file mode 100644 index 0000000..6620fc2 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java @@ -0,0 +1,505 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; +import org.apache.hadoop.hbase.backup.master.BackupController; +import org.apache.hadoop.hbase.backup.master.BackupLogCleaner; +import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; +import org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * Handles backup requests on server-side, creates backup context records in hbase:backup + * to keep track backup. The timestamps kept in hbase:backup table will be used for future + * incremental backup. Creates BackupContext and DispatchRequest. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BackupManager implements Closeable { + private static final Log LOG = LogFactory.getLog(BackupManager.class); + + private Configuration conf = null; + private BackupInfo backupContext = null; + + private ExecutorService pool = null; + + private boolean backupComplete = false; + + private BackupSystemTable systemTable; + + private final Connection conn; + + /** + * Backup manager constructor. + * @param conf configuration + * @throws IOException exception + */ + public BackupManager(Configuration conf) throws IOException { + if (!conf.getBoolean(HConstants.BACKUP_ENABLE_KEY, HConstants.BACKUP_ENABLE_DEFAULT)) { + throw new BackupException("HBase backup is not enabled. Check your " + + HConstants.BACKUP_ENABLE_KEY + " setting."); + } + this.conf = conf; + this.conn = ConnectionFactory.createConnection(conf); + this.systemTable = new BackupSystemTable(conn); + + } + + /** + * Return backup context + */ + protected BackupInfo getBackupContext() + { + return backupContext; + } + /** + * This method modifies the master's configuration in order to inject backup-related features + * @param conf configuration + */ + public static void decorateMasterConfiguration(Configuration conf) { + if (!isBackupEnabled(conf)) { + return; + } + // Add WAL archive cleaner plug-in + String plugins = conf.get(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS); + String cleanerClass = BackupLogCleaner.class.getCanonicalName(); + if (!plugins.contains(cleanerClass)) { + conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, plugins + "," + cleanerClass); + } + + String classes = conf.get("hbase.procedure.master.classes"); + String masterProcedureClass = LogRollMasterProcedureManager.class.getName(); + if(classes == null){ + conf.set("hbase.procedure.master.classes", masterProcedureClass); + } else if(!classes.contains(masterProcedureClass)){ + conf.set("hbase.procedure.master.classes", classes +","+masterProcedureClass); + } + + // Set Master Observer - Backup Controller + classes = conf.get("hbase.coprocessor.master.classes"); + String observerClass = BackupController.class.getName(); + if(classes == null){ + conf.set("hbase.coprocessor.master.classes", observerClass); + } else if(!classes.contains(observerClass)){ + conf.set("hbase.coprocessor.master.classes", classes +","+observerClass); + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Added log cleaner: " + cleanerClass); + LOG.debug("Added master procedure manager: "+masterProcedureClass); + LOG.debug("Added master observer: "+observerClass); + } + + } + + /** + * This method modifies the RS configuration in order to inject backup-related features + * @param conf configuration + */ + public static void decorateRSConfiguration(Configuration conf) { + if (!isBackupEnabled(conf)) { + return; + } + + String classes = conf.get("hbase.procedure.regionserver.classes"); + String regionProcedureClass = LogRollRegionServerProcedureManager.class.getName(); + if(classes == null){ + conf.set("hbase.procedure.regionserver.classes", regionProcedureClass); + } else if(!classes.contains(regionProcedureClass)){ + conf.set("hbase.procedure.regionserver.classes", classes +","+regionProcedureClass); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Added region procedure manager: "+regionProcedureClass); + } + + } + + + private static boolean isBackupEnabled(Configuration conf) { + return conf.getBoolean(HConstants.BACKUP_ENABLE_KEY, HConstants.BACKUP_ENABLE_DEFAULT); + } + + /** + * Get configuration + * @return configuration + */ + Configuration getConf() { + return conf; + } + + /** + * Stop all the work of backup. + */ + @Override + public void close() { + // currently, we shutdown now for all ongoing back handlers, we may need to do something like + // record the failed list somewhere later + if (this.pool != null) { + this.pool.shutdownNow(); + } + if (systemTable != null) { + try { + systemTable.close(); + } catch (Exception e) { + LOG.error(e); + } + } + if (conn != null) { + try { + conn.close(); + } catch (IOException e) { + LOG.error(e); + } + } + } + + /** + * Create a BackupContext based on input backup request. + * @param backupId backup id + * @param type type + * @param tableList table list + * @param targetRootDir root dir + * @param workers number of parallel workers + * @param bandwidth bandwidth per worker in MB per sec + * @return BackupInfo + * @throws BackupException exception + */ + public BackupInfo createBackupContext(String backupId, BackupType type, + List tableList, String targetRootDir, int workers, long bandwidth) + throws BackupException { + if (targetRootDir == null) { + throw new BackupException("Wrong backup request parameter: target backup root directory"); + } + + if (type == BackupType.FULL && (tableList == null || tableList.isEmpty())) { + // If table list is null for full backup, which means backup all tables. Then fill the table + // list with all user tables from meta. It no table available, throw the request exception. + + HTableDescriptor[] htds = null; + try (Admin hbadmin = conn.getAdmin()) { + htds = hbadmin.listTables(); + } catch (Exception e) { + throw new BackupException(e); + } + + if (htds == null) { + throw new BackupException("No table exists for full backup of all tables."); + } else { + tableList = new ArrayList<>(); + for (HTableDescriptor hTableDescriptor : htds) { + tableList.add(hTableDescriptor.getTableName()); + } + + LOG.info("Full backup all the tables available in the cluster: " + tableList); + } + } + + // there are one or more tables in the table list + backupContext = new BackupInfo(backupId, type, + tableList.toArray(new TableName[tableList.size()]), + targetRootDir); + backupContext.setBandwidth(bandwidth); + backupContext.setWorkers(workers); + return backupContext; + } + + /** + * Check if any ongoing backup. Currently, we only reply on checking status in hbase:backup. We + * need to consider to handle the case of orphan records in the future. Otherwise, all the coming + * request will fail. + * @return the ongoing backup id if on going backup exists, otherwise null + * @throws IOException exception + */ + private String getOngoingBackupId() throws IOException { + + ArrayList sessions = systemTable.getBackupContexts(BackupState.RUNNING); + if (sessions.size() == 0) { + return null; + } + return sessions.get(0).getBackupId(); + } + + /** + * Start the backup manager service. + * @throws IOException exception + */ + public void initialize() throws IOException { + String ongoingBackupId = this.getOngoingBackupId(); + if (ongoingBackupId != null) { + LOG.info("There is a ongoing backup " + ongoingBackupId + + ". Can not launch new backup until no ongoing backup remains."); + throw new BackupException("There is ongoing backup."); + } + + // Initialize thread pools + int nrThreads = this.conf.getInt("hbase.backup.threads.max", 1); + ThreadFactoryBuilder builder = new ThreadFactoryBuilder(); + builder.setNameFormat("BackupHandler-%1$d"); + this.pool = + new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS, + new LinkedBlockingQueue(), builder.build()); + ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true); + } + + public void setBackupContext(BackupInfo backupContext) { + this.backupContext = backupContext; + } + + /** + * Get direct ancestors of the current backup. + * @param backupCtx The backup context for the current backup + * @return The ancestors for the current backup + * @throws IOException exception + * @throws BackupException exception + */ + public ArrayList getAncestors(BackupInfo backupCtx) throws IOException, + BackupException { + LOG.debug("Getting the direct ancestors of the current backup "+ + backupCtx.getBackupId()); + + ArrayList ancestors = new ArrayList(); + + // full backup does not have ancestor + if (backupCtx.getType() == BackupType.FULL) { + LOG.debug("Current backup is a full backup, no direct ancestor for it."); + return ancestors; + } + + // get all backup history list in descending order + + ArrayList allHistoryList = getBackupHistory(true); + for (BackupInfo backup : allHistoryList) { + BackupImage image = + new BackupImage(backup.getBackupId(), backup.getType(), + backup.getTargetRootDir(), + backup.getTableNames(), backup.getStartTs(), backup + .getEndTs()); + // add the full backup image as an ancestor until the last incremental backup + if (backup.getType().equals(BackupType.FULL)) { + // check the backup image coverage, if previous image could be covered by the newer ones, + // then no need to add + if (!BackupManifest.canCoverImage(ancestors, image)) { + ancestors.add(image); + } + } else { + // found last incremental backup, if previously added full backup ancestor images can cover + // it, then this incremental ancestor is not the dependent of the current incremental + // backup, that is to say, this is the backup scope boundary of current table set. + // Otherwise, this incremental backup ancestor is the dependent ancestor of the ongoing + // incremental backup + if (BackupManifest.canCoverImage(ancestors, image)) { + LOG.debug("Met the backup boundary of the current table set. " + + "The root full backup images for the current backup scope:"); + for (BackupImage image1 : ancestors) { + LOG.debug(" BackupId: " + image1.getBackupId() + ", Backup directory: " + + image1.getRootDir()); + } + } else { + Path logBackupPath = + HBackupFileSystem.getLogBackupPath(backup.getTargetRootDir(), + backup.getBackupId()); + LOG.debug("Current backup has an incremental backup ancestor, " + + "touching its image manifest in " + logBackupPath.toString() + + " to construct the dependency."); + BackupManifest lastIncrImgManifest = new BackupManifest(conf, logBackupPath); + BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage(); + ancestors.add(lastIncrImage); + + LOG.debug("Last dependent incremental backup image information:"); + LOG.debug(" Token: " + lastIncrImage.getBackupId()); + LOG.debug(" Backup directory: " + lastIncrImage.getRootDir()); + } + } + } + LOG.debug("Got " + ancestors.size() + " ancestors for the current backup."); + return ancestors; + } + + /** + * Get the direct ancestors of this backup for one table involved. + * @param backupContext backup context + * @param table table + * @return backupImages on the dependency list + * @throws BackupException exception + * @throws IOException exception + */ + public ArrayList getAncestors(BackupInfo backupContext, TableName table) + throws BackupException, IOException { + ArrayList ancestors = getAncestors(backupContext); + ArrayList tableAncestors = new ArrayList(); + for (BackupImage image : ancestors) { + if (image.hasTable(table)) { + tableAncestors.add(image); + if (image.getType() == BackupType.FULL) { + break; + } + } + } + return tableAncestors; + } + + /* + * hbase:backup operations + */ + + /** + * Updates status (state) of a backup session in a persistent store + * @param context context + * @throws IOException exception + */ + public void updateBackupInfo(BackupInfo context) throws IOException { + systemTable.updateBackupInfo(context); + } + + /** + * Read the last backup start code (timestamp) of last successful backup. Will return null + * if there is no startcode stored in hbase:backup or the value is of length 0. These two + * cases indicate there is no successful backup completed so far. + * @return the timestamp of a last successful backup + * @throws IOException exception + */ + public String readBackupStartCode() throws IOException { + return systemTable.readBackupStartCode(backupContext.getTargetRootDir()); + } + + /** + * Write the start code (timestamp) to hbase:backup. If passed in null, then write 0 byte. + * @param startCode start code + * @throws IOException exception + */ + public void writeBackupStartCode(Long startCode) throws IOException { + systemTable.writeBackupStartCode(startCode, backupContext.getTargetRootDir()); + } + + /** + * Get the RS log information after the last log roll from hbase:backup. + * @return RS log info + * @throws IOException exception + */ + public HashMap readRegionServerLastLogRollResult() throws IOException { + return systemTable.readRegionServerLastLogRollResult(backupContext.getTargetRootDir()); + } + + /** + * Get all completed backup information (in desc order by time) + * @return history info of BackupCompleteData + * @throws IOException exception + */ + public ArrayList getBackupHistory() throws IOException { + return systemTable.getBackupHistory(); + } + + public ArrayList getBackupHistory(boolean completed) throws IOException { + return systemTable.getBackupHistory(completed); + } + /** + * Write the current timestamps for each regionserver to hbase:backup after a successful full or + * incremental backup. Each table may have a different set of log timestamps. The saved timestamp + * is of the last log file that was backed up already. + * @param tables tables + * @throws IOException exception + */ + public void writeRegionServerLogTimestamp(Set tables, + HashMap newTimestamps) throws IOException { + systemTable.writeRegionServerLogTimestamp(tables, newTimestamps, + backupContext.getTargetRootDir()); + } + + /** + * Read the timestamp for each region server log after the last successful backup. Each table has + * its own set of the timestamps. + * @return the timestamp for each region server. key: tableName value: + * RegionServer,PreviousTimeStamp + * @throws IOException exception + */ + public HashMap> readLogTimestampMap() throws IOException { + return systemTable.readLogTimestampMap(backupContext.getTargetRootDir()); + } + + /** + * Return the current tables covered by incremental backup. + * @return set of tableNames + * @throws IOException exception + */ + public Set getIncrementalBackupTableSet() throws IOException { + return systemTable.getIncrementalBackupTableSet(backupContext.getTargetRootDir()); + } + + /** + * Adds set of tables to overall incremental backup table set + * @param tables tables + * @throws IOException exception + */ + public void addIncrementalBackupTableSet(Set tables) throws IOException { + systemTable.addIncrementalBackupTableSet(tables, backupContext.getTargetRootDir()); + } + + /** + * Saves list of WAL files after incremental backup operation. These files will be stored until + * TTL expiration and are used by Backup Log Cleaner plugin to determine which WAL files can be + * safely purged. + */ + public void recordWALFiles(List files) throws IOException { + systemTable.addWALFiles(files, + backupContext.getBackupId(), backupContext.getTargetRootDir()); + } + + /** + * Get WAL files iterator + * @return WAL files iterator from hbase:backup + * @throws IOException + */ + public Iterator getWALFilesFromBackupSystem() throws IOException { + return systemTable.getWALFilesIterator(backupContext.getTargetRootDir()); + } + + public Connection getConnection() { + return conn; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java new file mode 100644 index 0000000..57a93f3 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java @@ -0,0 +1,787 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.TreeMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; + +import com.google.protobuf.InvalidProtocolBufferException; + + +/** + * Backup manifest Contains all the meta data of a backup image. The manifest info will be bundled + * as manifest file together with data. So that each backup image will contain all the info needed + * for restore. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class BackupManifest { + + private static final Log LOG = LogFactory.getLog(BackupManifest.class); + + // manifest file name + public static final String MANIFEST_FILE_NAME = ".backup.manifest"; + + // manifest file version, current is 1.0 + public static final String MANIFEST_VERSION = "1.0"; + + // backup image, the dependency graph is made up by series of backup images + + public static class BackupImage implements Comparable { + + private String backupId; + private BackupType type; + private String rootDir; + private List tableList; + private long startTs; + private long completeTs; + private ArrayList ancestors; + + public BackupImage() { + super(); + } + + public BackupImage(String backupId, BackupType type, String rootDir, + List tableList, long startTs, long completeTs) { + this.backupId = backupId; + this.type = type; + this.rootDir = rootDir; + this.tableList = tableList; + this.startTs = startTs; + this.completeTs = completeTs; + } + + static BackupImage fromProto(BackupProtos.BackupImage im) { + String backupId = im.getBackupId(); + String rootDir = im.getRootDir(); + long startTs = im.getStartTs(); + long completeTs = im.getCompleteTs(); + List tableListList = im.getTableListList(); + List tableList = new ArrayList(); + for(HBaseProtos.TableName tn : tableListList) { + tableList.add(ProtobufUtil.toTableName(tn)); + } + + List ancestorList = im.getAncestorsList(); + + BackupType type = + im.getBackupType() == BackupProtos.BackupType.FULL ? BackupType.FULL: + BackupType.INCREMENTAL; + + BackupImage image = new BackupImage(backupId, type, rootDir, tableList, startTs, completeTs); + for(BackupProtos.BackupImage img: ancestorList) { + image.addAncestor(fromProto(img)); + } + return image; + } + + BackupProtos.BackupImage toProto() { + BackupProtos.BackupImage.Builder builder = BackupProtos.BackupImage.newBuilder(); + builder.setBackupId(backupId); + builder.setCompleteTs(completeTs); + builder.setStartTs(startTs); + builder.setRootDir(rootDir); + if (type == BackupType.FULL) { + builder.setBackupType(BackupProtos.BackupType.FULL); + } else{ + builder.setBackupType(BackupProtos.BackupType.INCREMENTAL); + } + + for (TableName name: tableList) { + builder.addTableList(ProtobufUtil.toProtoTableName(name)); + } + + if (ancestors != null){ + for (BackupImage im: ancestors){ + builder.addAncestors(im.toProto()); + } + } + + return builder.build(); + } + + public String getBackupId() { + return backupId; + } + + public void setBackupId(String backupId) { + this.backupId = backupId; + } + + public BackupType getType() { + return type; + } + + public void setType(BackupType type) { + this.type = type; + } + + public String getRootDir() { + return rootDir; + } + + public void setRootDir(String rootDir) { + this.rootDir = rootDir; + } + + public List getTableNames() { + return tableList; + } + + public void setTableList(List tableList) { + this.tableList = tableList; + } + + public long getStartTs() { + return startTs; + } + + public void setStartTs(long startTs) { + this.startTs = startTs; + } + + public long getCompleteTs() { + return completeTs; + } + + public void setCompleteTs(long completeTs) { + this.completeTs = completeTs; + } + + public ArrayList getAncestors() { + if (this.ancestors == null) { + this.ancestors = new ArrayList(); + } + return this.ancestors; + } + + public void addAncestor(BackupImage backupImage) { + this.getAncestors().add(backupImage); + } + + public boolean hasAncestor(String token) { + for (BackupImage image : this.getAncestors()) { + if (image.getBackupId().equals(token)) { + return true; + } + } + return false; + } + + public boolean hasTable(TableName table) { + for (TableName t : tableList) { + if (t.equals(table)) { + return true; + } + } + return false; + } + + @Override + public int compareTo(BackupImage other) { + String thisBackupId = this.getBackupId(); + String otherBackupId = other.getBackupId(); + Long thisTS = Long.valueOf(thisBackupId.substring(thisBackupId.lastIndexOf("_") + 1)); + Long otherTS = Long.valueOf(otherBackupId.substring(otherBackupId.lastIndexOf("_") + 1)); + return thisTS.compareTo(otherTS); + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof BackupImage)) return false; + BackupImage other = (BackupImage) obj; + return compareTo(other) == 0; + } + + @Override + public int hashCode() { + int hash = 31 * type.hashCode() + backupId != null ? backupId.hashCode() : 0; + if (rootDir != null) { + hash = 31 * hash + rootDir.hashCode(); + } + hash = 31 * hash + (int)(startTs ^ (startTs >>> 32)); + hash = 31 * hash + (int)(completeTs ^ (completeTs >>> 32)); + if (tableList != null) { + for (TableName tn : tableList) { + hash = 31 * hash + tn.hashCode(); + } + } + if (ancestors != null) { + for (BackupImage bi : ancestors) { + hash = 31 * hash + bi.hashCode(); + } + } + return hash; + } + } + + // manifest version + private String version = MANIFEST_VERSION; + + // hadoop hbase configuration + protected Configuration config = null; + + // backup root directory + private String rootDir = null; + + // backup image directory + private String tableBackupDir = null; + + // backup log directory if this is an incremental backup + private String logBackupDir = null; + + // backup token + private String backupId; + + // backup type, full or incremental + private BackupType type; + + // the table list for the backup + private ArrayList tableList; + + // actual start timestamp of the backup process + private long startTs; + + // actual complete timestamp of the backup process + private long completeTs; + + // the region server timestamp for tables: + // > + private Map> incrTimeRanges; + + // dependency of this backup, including all the dependent images to do PIT recovery + private Map dependency; + + /** + * Construct manifest for a ongoing backup. + * @param backupCtx The ongoing backup context + */ + public BackupManifest(BackupInfo backupCtx) { + this.backupId = backupCtx.getBackupId(); + this.type = backupCtx.getType(); + this.rootDir = backupCtx.getTargetRootDir(); + if (this.type == BackupType.INCREMENTAL) { + this.logBackupDir = backupCtx.getHLogTargetDir(); + } + this.startTs = backupCtx.getStartTs(); + this.completeTs = backupCtx.getEndTs(); + this.loadTableList(backupCtx.getTableNames()); + } + + + /** + * Construct a table level manifest for a backup of the named table. + * @param backupCtx The ongoing backup context + */ + public BackupManifest(BackupInfo backupCtx, TableName table) { + this.backupId = backupCtx.getBackupId(); + this.type = backupCtx.getType(); + this.rootDir = backupCtx.getTargetRootDir(); + this.tableBackupDir = backupCtx.getBackupStatus(table).getTargetDir(); + if (this.type == BackupType.INCREMENTAL) { + this.logBackupDir = backupCtx.getHLogTargetDir(); + } + this.startTs = backupCtx.getStartTs(); + this.completeTs = backupCtx.getEndTs(); + List tables = new ArrayList(); + tables.add(table); + this.loadTableList(tables); + } + + /** + * Construct manifest from a backup directory. + * @param conf configuration + * @param backupPath backup path + * @throws BackupException exception + */ + + public BackupManifest(Configuration conf, Path backupPath) throws BackupException { + if (LOG.isDebugEnabled()) { + LOG.debug("Loading manifest from: " + backupPath.toString()); + } + // The input backupDir may not exactly be the backup table dir. + // It could be the backup log dir where there is also a manifest file stored. + // This variable's purpose is to keep the correct and original location so + // that we can store/persist it. + this.tableBackupDir = backupPath.toString(); + this.config = conf; + try { + + FileSystem fs = backupPath.getFileSystem(conf); + FileStatus[] subFiles = BackupClientUtil.listStatus(fs, backupPath, null); + if (subFiles == null) { + String errorMsg = backupPath.toString() + " does not exist"; + LOG.error(errorMsg); + throw new IOException(errorMsg); + } + for (FileStatus subFile : subFiles) { + if (subFile.getPath().getName().equals(MANIFEST_FILE_NAME)) { + + // load and set manifest field from file content + FSDataInputStream in = fs.open(subFile.getPath()); + long len = subFile.getLen(); + byte[] pbBytes = new byte[(int) len]; + in.readFully(pbBytes); + BackupProtos.BackupManifest proto = null; + try{ + proto = parseFrom(pbBytes); + } catch(Exception e){ + throw new BackupException(e); + } + this.version = proto.getVersion(); + this.backupId = proto.getBackupId(); + this.type = BackupType.valueOf(proto.getType().name()); + // Here the parameter backupDir is where the manifest file is. + // There should always be a manifest file under: + // backupRootDir/namespace/table/backupId/.backup.manifest + this.rootDir = backupPath.getParent().getParent().getParent().toString(); + + Path p = backupPath.getParent(); + if (p.getName().equals(HConstants.HREGION_LOGDIR_NAME)) { + this.rootDir = p.getParent().toString(); + } else { + this.rootDir = p.getParent().getParent().toString(); + } + + loadTableList(proto); + this.startTs = proto.getStartTs(); + this.completeTs = proto.getCompleteTs(); + loadIncrementalTimestampMap(proto); + loadDependency(proto); + //TODO: merge will be implemented by future jira + LOG.debug("Loaded manifest instance from manifest file: " + + BackupClientUtil.getPath(subFile.getPath())); + return; + } + } + String errorMsg = "No manifest file found in: " + backupPath.toString(); + throw new IOException(errorMsg); + + } catch (IOException e) { + throw new BackupException(e.getMessage()); + } + } + + private void loadIncrementalTimestampMap(BackupProtos.BackupManifest proto) { + List list = proto.getTstMapList(); + if(list == null || list.size() == 0) return; + this.incrTimeRanges = new HashMap>(); + for(BackupProtos.TableServerTimestamp tst: list){ + TableName tn = ProtobufUtil.toTableName(tst.getTable()); + HashMap map = this.incrTimeRanges.get(tn); + if(map == null){ + map = new HashMap(); + this.incrTimeRanges.put(tn, map); + } + List listSt = tst.getServerTimestampList(); + for(BackupProtos.ServerTimestamp stm: listSt) { + map.put(stm.getServer(), stm.getTimestamp()); + } + } + } + + private void loadDependency(BackupProtos.BackupManifest proto) { + if(LOG.isDebugEnabled()) { + LOG.debug("load dependency for: "+proto.getBackupId()); + } + + dependency = new HashMap(); + List list = proto.getDependentBackupImageList(); + for (BackupProtos.BackupImage im : list) { + BackupImage bim = BackupImage.fromProto(im); + if(im.getBackupId() != null){ + dependency.put(im.getBackupId(), bim); + } else{ + LOG.warn("Load dependency for backup manifest: "+ backupId+ + ". Null backup id in dependent image"); + } + } + } + + private void loadTableList(BackupProtos.BackupManifest proto) { + this.tableList = new ArrayList(); + List list = proto.getTableListList(); + for (HBaseProtos.TableName name: list) { + this.tableList.add(ProtobufUtil.toTableName(name)); + } + } + + public BackupType getType() { + return type; + } + + public void setType(BackupType type) { + this.type = type; + } + + /** + * Loads table list. + * @param tableList Table list + */ + private void loadTableList(List tableList) { + + this.tableList = this.getTableList(); + if (this.tableList.size() > 0) { + this.tableList.clear(); + } + for (int i = 0; i < tableList.size(); i++) { + this.tableList.add(tableList.get(i)); + } + + LOG.debug(tableList.size() + " tables exist in table set."); + } + + /** + * Get the table set of this image. + * @return The table set list + */ + public ArrayList getTableList() { + if (this.tableList == null) { + this.tableList = new ArrayList(); + } + return this.tableList; + } + + /** + * Persist the manifest file. + * @throws IOException IOException when storing the manifest file. + */ + + public void store(Configuration conf) throws BackupException { + byte[] data = toByteArray(); + + // write the file, overwrite if already exist + Path manifestFilePath = + new Path(new Path((this.tableBackupDir != null ? this.tableBackupDir : this.logBackupDir)) + ,MANIFEST_FILE_NAME); + try { + FSDataOutputStream out = + manifestFilePath.getFileSystem(conf).create(manifestFilePath, true); + out.write(data); + out.close(); + } catch (IOException e) { + throw new BackupException(e.getMessage()); + } + + LOG.info("Manifest file stored to " + manifestFilePath); + } + + /** + * Protobuf serialization + * @return The filter serialized using pb + */ + public byte[] toByteArray() { + BackupProtos.BackupManifest.Builder builder = BackupProtos.BackupManifest.newBuilder(); + builder.setVersion(this.version); + builder.setBackupId(this.backupId); + builder.setType(BackupProtos.BackupType.valueOf(this.type.name())); + setTableList(builder); + builder.setStartTs(this.startTs); + builder.setCompleteTs(this.completeTs); + setIncrementalTimestampMap(builder); + setDependencyMap(builder); + return builder.build().toByteArray(); + } + + private void setIncrementalTimestampMap(BackupProtos.BackupManifest.Builder builder) { + if (this.incrTimeRanges == null) { + return; + } + for (Entry> entry: this.incrTimeRanges.entrySet()) { + TableName key = entry.getKey(); + HashMap value = entry.getValue(); + BackupProtos.TableServerTimestamp.Builder tstBuilder = + BackupProtos.TableServerTimestamp.newBuilder(); + tstBuilder.setTable(ProtobufUtil.toProtoTableName(key)); + + for (Entry entry2 : value.entrySet()) { + String s = entry2.getKey(); + BackupProtos.ServerTimestamp.Builder stBuilder = BackupProtos.ServerTimestamp.newBuilder(); + stBuilder.setServer(s); + stBuilder.setTimestamp(entry2.getValue()); + tstBuilder.addServerTimestamp(stBuilder.build()); + } + builder.addTstMap(tstBuilder.build()); + } + } + + private void setDependencyMap(BackupProtos.BackupManifest.Builder builder) { + for (BackupImage image: getDependency().values()) { + builder.addDependentBackupImage(image.toProto()); + } + } + + private void setTableList(BackupProtos.BackupManifest.Builder builder) { + for(TableName name: tableList){ + builder.addTableList(ProtobufUtil.toProtoTableName(name)); + } + } + + /** + * Parse protobuf from byte array + * @param pbBytes A pb serialized BackupManifest instance + * @return An instance of made from bytes + * @throws DeserializationException + */ + private static BackupProtos.BackupManifest parseFrom(final byte[] pbBytes) + throws DeserializationException { + BackupProtos.BackupManifest proto; + try { + proto = BackupProtos.BackupManifest.parseFrom(pbBytes); + } catch (InvalidProtocolBufferException e) { + throw new DeserializationException(e); + } + return proto; + } + + /** + * Get manifest file version + * @return version + */ + public String getVersion() { + return version; + } + + /** + * Get this backup image. + * @return the backup image. + */ + public BackupImage getBackupImage() { + return this.getDependency().get(this.backupId); + } + + /** + * Add dependent backup image for this backup. + * @param image The direct dependent backup image + */ + public void addDependentImage(BackupImage image) { + this.getDependency().get(this.backupId).addAncestor(image); + this.setDependencyMap(this.getDependency(), image); + } + + + + /** + * Get all dependent backup images. The image of this backup is also contained. + * @return The dependent backup images map + */ + public Map getDependency() { + if (this.dependency == null) { + this.dependency = new HashMap(); + LOG.debug(this.rootDir + " " + this.backupId + " " + this.type); + this.dependency.put(this.backupId, + new BackupImage(this.backupId, this.type, this.rootDir, tableList, this.startTs, + this.completeTs)); + } + return this.dependency; + } + + /** + * Set the incremental timestamp map directly. + * @param incrTimestampMap timestamp map + */ + public void setIncrTimestampMap(HashMap> incrTimestampMap) { + this.incrTimeRanges = incrTimestampMap; + } + + + public Map> getIncrTimestampMap() { + if (this.incrTimeRanges == null) { + this.incrTimeRanges = new HashMap>(); + } + return this.incrTimeRanges; + } + + + /** + * Get the image list of this backup for restore in time order. + * @param reverse If true, then output in reverse order, otherwise in time order from old to new + * @return the backup image list for restore in time order + */ + public ArrayList getRestoreDependentList(boolean reverse) { + TreeMap restoreImages = new TreeMap(); + for (BackupImage image : this.getDependency().values()) { + restoreImages.put(Long.valueOf(image.startTs), image); + } + return new ArrayList(reverse ? (restoreImages.descendingMap().values()) + : (restoreImages.values())); + } + + /** + * Get the dependent image list for a specific table of this backup in time order from old to new + * if want to restore to this backup image level. + * @param table table + * @return the backup image list for a table in time order + */ + public ArrayList getDependentListByTable(TableName table) { + ArrayList tableImageList = new ArrayList(); + ArrayList imageList = getRestoreDependentList(true); + for (BackupImage image : imageList) { + if (image.hasTable(table)) { + tableImageList.add(image); + if (image.getType() == BackupType.FULL) { + break; + } + } + } + Collections.reverse(tableImageList); + return tableImageList; + } + + /** + * Get the full dependent image list in the whole dependency scope for a specific table of this + * backup in time order from old to new. + * @param table table + * @return the full backup image list for a table in time order in the whole scope of the + * dependency of this image + */ + public ArrayList getAllDependentListByTable(TableName table) { + ArrayList tableImageList = new ArrayList(); + ArrayList imageList = getRestoreDependentList(false); + for (BackupImage image : imageList) { + if (image.hasTable(table)) { + tableImageList.add(image); + } + } + return tableImageList; + } + + + /** + * Recursively set the dependency map of the backup images. + * @param map The dependency map + * @param image The backup image + */ + private void setDependencyMap(Map map, BackupImage image) { + if (image == null) { + return; + } else { + map.put(image.getBackupId(), image); + for (BackupImage img : image.getAncestors()) { + setDependencyMap(map, img); + } + } + } + + /** + * Check whether backup image1 could cover backup image2 or not. + * @param image1 backup image 1 + * @param image2 backup image 2 + * @return true if image1 can cover image2, otherwise false + */ + public static boolean canCoverImage(BackupImage image1, BackupImage image2) { + // image1 can cover image2 only when the following conditions are satisfied: + // - image1 must not be an incremental image; + // - image1 must be taken after image2 has been taken; + // - table set of image1 must cover the table set of image2. + if (image1.getType() == BackupType.INCREMENTAL) { + return false; + } + if (image1.getStartTs() < image2.getStartTs()) { + return false; + } + List image1TableList = image1.getTableNames(); + List image2TableList = image2.getTableNames(); + boolean found = false; + for (int i = 0; i < image2TableList.size(); i++) { + found = false; + for (int j = 0; j < image1TableList.size(); j++) { + if (image2TableList.get(i).equals(image1TableList.get(j))) { + found = true; + break; + } + } + if (!found) { + return false; + } + } + + LOG.debug("Backup image " + image1.getBackupId() + " can cover " + image2.getBackupId()); + return true; + } + + /** + * Check whether backup image set could cover a backup image or not. + * @param fullImages The backup image set + * @param image The target backup image + * @return true if fullImages can cover image, otherwise false + */ + public static boolean canCoverImage(ArrayList fullImages, BackupImage image) { + // fullImages can cover image only when the following conditions are satisfied: + // - each image of fullImages must not be an incremental image; + // - each image of fullImages must be taken after image has been taken; + // - sum table set of fullImages must cover the table set of image. + for (BackupImage image1 : fullImages) { + if (image1.getType() == BackupType.INCREMENTAL) { + return false; + } + if (image1.getStartTs() < image.getStartTs()) { + return false; + } + } + + ArrayList image1TableList = new ArrayList(); + for (BackupImage image1 : fullImages) { + List tableList = image1.getTableNames(); + for (TableName table : tableList) { + image1TableList.add(table.getNameAsString()); + } + } + ArrayList image2TableList = new ArrayList(); + List tableList = image.getTableNames(); + for (TableName table : tableList) { + image2TableList.add(table.getNameAsString()); + } + + for (int i = 0; i < image2TableList.size(); i++) { + if (image1TableList.contains(image2TableList.get(i)) == false) { + return false; + } + } + + LOG.debug("Full image set can cover image " + image.getBackupId()); + return true; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSnapshotCopy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSnapshotCopy.java new file mode 100644 index 0000000..731ccd3 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSnapshotCopy.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import org.apache.hadoop.hbase.snapshot.ExportSnapshot; + +/* this class will be extended in future jira to support progress report */ +public class BackupSnapshotCopy extends ExportSnapshot { + private String table; + + public BackupSnapshotCopy(String table) { + super(); + this.table = table; + } + + public String getTable() { + return this.table; + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java new file mode 100644 index 0000000..4dd2768 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java @@ -0,0 +1,355 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.backup.util.BackupServerUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable.WALItem; + +/** + * After a full backup was created, the incremental backup will only store the changes made + * after the last full or incremental backup. + * + * Creating the backup copies the logfiles in .logs and .oldlogs since the last backup timestamp. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class IncrementalBackupManager { + public static final Log LOG = LogFactory.getLog(IncrementalBackupManager.class); + + // parent manager + private final BackupManager backupManager; + private final Configuration conf; + private final Connection conn; + + public IncrementalBackupManager(BackupManager bm) { + this.backupManager = bm; + this.conf = bm.getConf(); + this.conn = bm.getConnection(); + } + + /** + * Obtain the list of logs that need to be copied out for this incremental backup. The list is set + * in BackupContext. + * @param backupContext backup context + * @return The new HashMap of RS log timestamps after the log roll for this incremental backup. + * @throws IOException exception + */ + public HashMap getIncrBackupLogFileList(BackupInfo backupContext) + throws IOException { + List logList; + HashMap newTimestamps; + HashMap previousTimestampMins; + + String savedStartCode = backupManager.readBackupStartCode(); + + // key: tableName + // value: + HashMap> previousTimestampMap = + backupManager.readLogTimestampMap(); + + previousTimestampMins = BackupServerUtil.getRSLogTimestampMins(previousTimestampMap); + + if (LOG.isDebugEnabled()) { + LOG.debug("StartCode " + savedStartCode + "for backupID " + backupContext.getBackupId()); + } + // get all new log files from .logs and .oldlogs after last TS and before new timestamp + if (savedStartCode == null || + previousTimestampMins == null || + previousTimestampMins.isEmpty()) { + throw new IOException("Cannot read any previous back up timestamps from hbase:backup. " + + "In order to create an incremental backup, at least one full backup is needed."); + } + + try (Admin admin = conn.getAdmin()) { + LOG.info("Execute roll log procedure for incremental backup ..."); + HashMap props = new HashMap(); + props.put("backupRoot", backupContext.getTargetRootDir()); + admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, + LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props); + } + + newTimestamps = backupManager.readRegionServerLastLogRollResult(); + + logList = getLogFilesForNewBackup(previousTimestampMins, newTimestamps, conf, savedStartCode); + List logFromSystemTable = + getLogFilesFromBackupSystem(previousTimestampMins, + newTimestamps, backupManager.getBackupContext().getTargetRootDir()); + addLogsFromBackupSystemToContext(logFromSystemTable); + + logList = excludeAlreadyBackedUpWALs(logList, logFromSystemTable); + backupContext.setIncrBackupFileList(logList); + + return newTimestamps; + } + + + private List excludeAlreadyBackedUpWALs(List logList, + List logFromSystemTable) { + + List backupedWALList = toWALList(logFromSystemTable); + logList.removeAll(backupedWALList); + return logList; + } + + private List toWALList(List logFromSystemTable) { + + List list = new ArrayList(logFromSystemTable.size()); + for(WALItem item : logFromSystemTable){ + list.add(item.getWalFile()); + } + return list; + } + + private void addLogsFromBackupSystemToContext(List logFromSystemTable) { + List walFiles = new ArrayList(); + for(WALItem item : logFromSystemTable){ + Path p = new Path(item.getWalFile()); + String walFileName = p.getName(); + String backupId = item.getBackupId(); + String relWALPath = backupId + Path.SEPARATOR+walFileName; + walFiles.add(relWALPath); + } + } + + + /** + * For each region server: get all log files newer than the last timestamps, + * but not newer than the newest timestamps. FROM hbase:backup table + * @param olderTimestamps - the timestamp for each region server of the last backup. + * @param newestTimestamps - the timestamp for each region server that the backup should lead to. + * @return list of log files which needs to be added to this backup + * @throws IOException + */ + private List getLogFilesFromBackupSystem(HashMap olderTimestamps, + HashMap newestTimestamps, String backupRoot) throws IOException { + List logFiles = new ArrayList(); + Iterator it = backupManager.getWALFilesFromBackupSystem(); + while (it.hasNext()) { + WALItem item = it.next(); + String rootDir = item.getBackupRoot(); + if (!rootDir.equals(backupRoot)) { + continue; + } + String walFileName = item.getWalFile(); + String server = BackupServerUtil.parseHostNameFromLogFile(new Path(walFileName)); + if (server == null) { + continue; + } + Long tss = getTimestamp(walFileName); + Long oldTss = olderTimestamps.get(server); + Long newTss = newestTimestamps.get(server); + if (oldTss == null) { + logFiles.add(item); + continue; + } + if (newTss == null) { + newTss = Long.MAX_VALUE; + } + if (tss > oldTss && tss < newTss) { + logFiles.add(item); + } + } + return logFiles; + } + + private Long getTimestamp(String walFileName) { + int index = walFileName.lastIndexOf(BackupServerUtil.LOGNAME_SEPARATOR); + return Long.parseLong(walFileName.substring(index+1)); + } + + /** + * For each region server: get all log files newer than the last timestamps but not newer than the + * newest timestamps. + * @param olderTimestamps the timestamp for each region server of the last backup. + * @param newestTimestamps the timestamp for each region server that the backup should lead to. + * @param conf the Hadoop and Hbase configuration + * @param savedStartCode the startcode (timestamp) of last successful backup. + * @return a list of log files to be backed up + * @throws IOException exception + */ + private List getLogFilesForNewBackup(HashMap olderTimestamps, + HashMap newestTimestamps, Configuration conf, String savedStartCode) + throws IOException { + LOG.debug("In getLogFilesForNewBackup()\n" + "olderTimestamps: " + olderTimestamps + + "\n newestTimestamps: " + newestTimestamps); + Path rootdir = FSUtils.getRootDir(conf); + Path logDir = new Path(rootdir, HConstants.HREGION_LOGDIR_NAME); + Path oldLogDir = new Path(rootdir, HConstants.HREGION_OLDLOGDIR_NAME); + FileSystem fs = rootdir.getFileSystem(conf); + NewestLogFilter pathFilter = new NewestLogFilter(); + + List resultLogFiles = new ArrayList(); + List newestLogs = new ArrayList(); + + /* + * The old region servers and timestamps info we kept in hbase:backup may be out of sync if new + * region server is added or existing one lost. We'll deal with it here when processing the + * logs. If data in hbase:backup has more hosts, just ignore it. If the .logs directory includes + * more hosts, the additional hosts will not have old timestamps to compare with. We'll just use + * all the logs in that directory. We always write up-to-date region server and timestamp info + * to hbase:backup at the end of successful backup. + */ + + FileStatus[] rss; + Path p; + String host; + Long oldTimeStamp; + String currentLogFile; + Long currentLogTS; + + // Get the files in .logs. + rss = fs.listStatus(logDir); + for (FileStatus rs : rss) { + p = rs.getPath(); + host = BackupServerUtil.parseHostNameFromLogFile(p); + if (host == null) { + continue; + } + FileStatus[] logs; + oldTimeStamp = olderTimestamps.get(host); + // It is possible that there is no old timestamp in hbase:backup for this host if + // this region server is newly added after our last backup. + if (oldTimeStamp == null) { + logs = fs.listStatus(p); + } else { + pathFilter.setLastBackupTS(oldTimeStamp); + logs = fs.listStatus(p, pathFilter); + } + for (FileStatus log : logs) { + LOG.debug("currentLogFile: " + log.getPath().toString()); + if (AbstractFSWALProvider.isMetaFile(log.getPath())) { + if (LOG.isDebugEnabled()) { + LOG.debug("Skip hbase:meta log file: " + log.getPath().getName()); + } + continue; + } + currentLogFile = log.getPath().toString(); + resultLogFiles.add(currentLogFile); + currentLogTS = BackupClientUtil.getCreationTime(log.getPath()); + // newestTimestamps is up-to-date with the current list of hosts + // so newestTimestamps.get(host) will not be null. + if (currentLogTS > newestTimestamps.get(host)) { + newestLogs.add(currentLogFile); + } + } + } + + // Include the .oldlogs files too. + FileStatus[] oldlogs = fs.listStatus(oldLogDir); + for (FileStatus oldlog : oldlogs) { + p = oldlog.getPath(); + currentLogFile = p.toString(); + if (AbstractFSWALProvider.isMetaFile(p)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Skip .meta log file: " + currentLogFile); + } + continue; + } + host = BackupClientUtil.parseHostFromOldLog(p); + if (host == null) { + continue; + } + currentLogTS = BackupClientUtil.getCreationTime(p); + oldTimeStamp = olderTimestamps.get(host); + /* + * It is possible that there is no old timestamp in hbase:backup for this host. At the time of + * our last backup operation, this rs did not exist. The reason can be one of the two: 1. The + * rs already left/crashed. Its logs were moved to .oldlogs. 2. The rs was added after our + * last backup. + */ + if (oldTimeStamp == null) { + if (currentLogTS < Long.parseLong(savedStartCode)) { + // This log file is really old, its region server was before our last backup. + continue; + } else { + resultLogFiles.add(currentLogFile); + } + } else if (currentLogTS > oldTimeStamp) { + resultLogFiles.add(currentLogFile); + } + + // It is possible that a host in .oldlogs is an obsolete region server + // so newestTimestamps.get(host) here can be null. + // Even if these logs belong to a obsolete region server, we still need + // to include they to avoid loss of edits for backup. + Long newTimestamp = newestTimestamps.get(host); + if (newTimestamp != null && currentLogTS > newTimestamp) { + newestLogs.add(currentLogFile); + } + } + // remove newest log per host because they are still in use + resultLogFiles.removeAll(newestLogs); + return resultLogFiles; + } + + static class NewestLogFilter implements PathFilter { + private Long lastBackupTS = 0L; + + public NewestLogFilter() { + } + + protected void setLastBackupTS(Long ts) { + this.lastBackupTS = ts; + } + + @Override + public boolean accept(Path path) { + // skip meta table log -- ts.meta file + if (AbstractFSWALProvider.isMetaFile(path)) { + if (LOG.isDebugEnabled()) { + LOG.debug("Skip .meta log file: " + path.getName()); + } + return false; + } + Long timestamp = null; + try { + timestamp = BackupClientUtil.getCreationTime(path); + return timestamp > lastBackupTS; + } catch (Exception e) { + LOG.warn("Cannot read timestamp of log file " + path); + return false; + } + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java new file mode 100644 index 0000000..a04fc08 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java @@ -0,0 +1,295 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; +import java.util.TreeSet; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.RestoreClient; +import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; + +/** + * The main class which interprets the given arguments and trigger restore operation. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public final class RestoreClientImpl implements RestoreClient { + + private static final Log LOG = LogFactory.getLog(RestoreClientImpl.class); + private Configuration conf; + + public RestoreClientImpl() { + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + + + + /** + * Restore operation. Stage 1: validate backupManifest, and check target tables + * @param backupRootDir The root dir for backup image + * @param backupId The backup id for image to be restored + * @param check True if only do dependency check + * @param sTableArray The array of tables to be restored + * @param tTableArray The array of mapping tables to restore to + * @param isOverwrite True then do restore overwrite if target table exists, otherwise fail the + * request if target table exists + * @throws IOException if any failure during restore + */ + @Override + public void restore(String backupRootDir, + String backupId, boolean check, TableName[] sTableArray, + TableName[] tTableArray, boolean isOverwrite) throws IOException { + + HashMap backupManifestMap = new HashMap<>(); + // check and load backup image manifest for the tables + Path rootPath = new Path(backupRootDir); + HBackupFileSystem.checkImageManifestExist(backupManifestMap, sTableArray, conf, rootPath, + backupId); + try { + // Check and validate the backup image and its dependencies + if (check) { + if (validate(backupManifestMap)) { + LOG.info("Checking backup images: ok"); + } else { + String errMsg = "Some dependencies are missing for restore"; + LOG.error(errMsg); + throw new IOException(errMsg); + } + } + + if (tTableArray == null) { + tTableArray = sTableArray; + } + // check the target tables + checkTargetTables(tTableArray, isOverwrite); + // start restore process + restoreStage(backupManifestMap, sTableArray, tTableArray, isOverwrite); + LOG.info("Restore for " + Arrays.asList(sTableArray) + " are successful!"); + } catch (IOException e) { + LOG.error("ERROR: restore failed with error: " + e.getMessage()); + throw e; + } + + } + + private boolean validate(HashMap backupManifestMap) + throws IOException { + boolean isValid = true; + + for (Entry manifestEntry : backupManifestMap.entrySet()) { + TableName table = manifestEntry.getKey(); + TreeSet imageSet = new TreeSet(); + + ArrayList depList = manifestEntry.getValue().getDependentListByTable(table); + if (depList != null && !depList.isEmpty()) { + imageSet.addAll(depList); + } + + LOG.info("Dependent image(s) from old to new:"); + for (BackupImage image : imageSet) { + String imageDir = + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table); + if (!BackupClientUtil.checkPathExist(imageDir, conf)) { + LOG.error("ERROR: backup image does not exist: " + imageDir); + isValid = false; + break; + } + // TODO More validation? + LOG.info("Backup image: " + image.getBackupId() + " for '" + table + "' is available"); + } + } + + return isValid; + } + + /** + * Validate target Tables + * @param tTableArray: target tables + * @param isOverwrite overwrite existing table + * @throws IOException exception + */ + private void checkTargetTables(TableName[] tTableArray, boolean isOverwrite) + throws IOException { + ArrayList existTableList = new ArrayList<>(); + ArrayList disabledTableList = new ArrayList<>(); + + // check if the tables already exist + try(Connection conn = ConnectionFactory.createConnection(conf); + Admin admin = conn.getAdmin()) { + for (TableName tableName : tTableArray) { + if (admin.tableExists(tableName)) { + existTableList.add(tableName); + if (admin.isTableDisabled(tableName)) { + disabledTableList.add(tableName); + } + } else { + LOG.info("HBase table " + tableName + + " does not exist. It will be created during restore process"); + } + } + } + + if (existTableList.size() > 0) { + if (!isOverwrite) { + LOG.error("Existing table found in the restore target, please add \"-overwrite\" " + + "option in the command if you mean to restore to these existing tables"); + LOG.info("Existing table list in restore target: " + existTableList); + throw new IOException("Existing table found in target while no \"-overwrite\" " + + "option found"); + } else { + if (disabledTableList.size() > 0) { + LOG.error("Found offline table in the restore target, " + + "please enable them before restore with \"-overwrite\" option"); + LOG.info("Offline table list in restore target: " + disabledTableList); + throw new IOException( + "Found offline table in the target when restore with \"-overwrite\" option"); + } + } + } + } + + /** + * Restore operation. Stage 2: resolved Backup Image dependency + * @param backupManifestMap : tableName, Manifest + * @param sTableArray The array of tables to be restored + * @param tTableArray The array of mapping tables to restore to + * @return set of BackupImages restored + * @throws IOException exception + */ + private void restoreStage(HashMap backupManifestMap, + TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException { + TreeSet restoreImageSet = new TreeSet(); + boolean truncateIfExists = isOverwrite; + try { + for (int i = 0; i < sTableArray.length; i++) { + TableName table = sTableArray[i]; + BackupManifest manifest = backupManifestMap.get(table); + // Get the image list of this backup for restore in time order from old + // to new. + List list = new ArrayList(); + list.add(manifest.getBackupImage()); + List depList = manifest.getDependentListByTable(table); + list.addAll(depList); + TreeSet restoreList = new TreeSet(list); + LOG.debug("need to clear merged Image. to be implemented in future jira"); + restoreImages(restoreList.iterator(), table, tTableArray[i], truncateIfExists); + restoreImageSet.addAll(restoreList); + + if (restoreImageSet != null && !restoreImageSet.isEmpty()) { + LOG.info("Restore includes the following image(s):"); + for (BackupImage image : restoreImageSet) { + LOG.info("Backup: " + + image.getBackupId() + + " " + + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), + table)); + } + } + } + } catch (Exception e) { + LOG.error("Failed", e); + throw new IOException(e); + } + LOG.debug("restoreStage finished"); + + } + + /** + * Restore operation handle each backupImage in iterator + * @param it: backupImage iterator - ascending + * @param sTable: table to be restored + * @param tTable: table to be restored to + * @throws IOException exception + */ + private void restoreImages(Iterator it, TableName sTable, + TableName tTable, boolean truncateIfExists) + throws IOException { + + // First image MUST be image of a FULL backup + BackupImage image = it.next(); + + String rootDir = image.getRootDir(); + String backupId = image.getBackupId(); + Path backupRoot = new Path(rootDir); + + // We need hFS only for full restore (see the code) + RestoreServerUtil restoreTool = new RestoreServerUtil(conf, backupRoot, backupId); + BackupManifest manifest = HBackupFileSystem.getManifest(sTable, conf, backupRoot, backupId); + + Path tableBackupPath = HBackupFileSystem.getTableBackupPath(sTable, backupRoot, backupId); + + // TODO: convert feature will be provided in a future JIRA + boolean converted = false; + + if (manifest.getType() == BackupType.FULL || converted) { + LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from " + + (converted ? "converted" : "full") + " backup image " + tableBackupPath.toString()); + restoreTool.fullRestoreTable(tableBackupPath, sTable, tTable, + converted, truncateIfExists); + + } else { // incremental Backup + throw new IOException("Unexpected backup type " + image.getType()); + } + + // The rest one are incremental + if (it.hasNext()) { + List logDirList = new ArrayList(); + while (it.hasNext()) { + BackupImage im = it.next(); + String logBackupDir = HBackupFileSystem.getLogBackupDir(im.getRootDir(), im.getBackupId()); + logDirList.add(logBackupDir); + } + String logDirs = StringUtils.join(logDirList, ","); + LOG.info("Restoring '" + sTable + "' to '" + tTable + + "' from log dirs: " + logDirs); + String[] sarr = new String[logDirList.size()]; + logDirList.toArray(sarr); + Path[] paths = org.apache.hadoop.util.StringUtils.stringToPath(sarr); + restoreTool.incrementalRestoreTable(paths, new TableName[] { sTable }, + new TableName[] { tTable }); + } + LOG.info(sTable + " has been successfully restored to " + tTable); + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyService.java new file mode 100644 index 0000000..9b11d98 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyService.java @@ -0,0 +1,344 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.mapreduce; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.math.BigDecimal; +import java.util.Arrays; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupCopyService; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.impl.BackupManager; +import org.apache.hadoop.hbase.backup.util.BackupServerUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.snapshot.ExportSnapshot; +import org.apache.hadoop.mapreduce.Cluster; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.JobID; +import org.apache.hadoop.tools.DistCp; +import org.apache.hadoop.tools.DistCpConstants; +import org.apache.hadoop.tools.DistCpOptions; +import org.apache.zookeeper.KeeperException.NoNodeException; +/** + * Copier for backup operation. Basically, there are 2 types of copy. One is copying from snapshot, + * which bases on extending ExportSnapshot's function with copy progress reporting to ZooKeeper + * implementation. The other is copying for incremental log files, which bases on extending + * DistCp's function with copy progress reporting to ZooKeeper implementation. + * + * For now this is only a wrapper. The other features such as progress and increment backup will be + * implemented in future jira + */ + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class MapReduceBackupCopyService implements BackupCopyService { + private static final Log LOG = LogFactory.getLog(MapReduceBackupCopyService.class); + + private Configuration conf; + // private static final long BYTES_PER_MAP = 2 * 256 * 1024 * 1024; + + // Accumulated progress within the whole backup process for the copy operation + private float progressDone = 0.1f; + private long bytesCopied = 0; + private static float INIT_PROGRESS = 0.1f; + + // The percentage of the current copy task within the whole task if multiple time copies are + // needed. The default value is 100%, which means only 1 copy task for the whole. + private float subTaskPercntgInWholeTask = 1f; + + public MapReduceBackupCopyService() { + } + + @Override + public Configuration getConf() { + return conf; + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + + /** + * Get the current copy task percentage within the whole task if multiple copies are needed. + * @return the current copy task percentage + */ + public float getSubTaskPercntgInWholeTask() { + return subTaskPercntgInWholeTask; + } + + /** + * Set the current copy task percentage within the whole task if multiple copies are needed. Must + * be called before calling + * {@link #copy(BackupInfo, BackupManager, Configuration, BackupCopyService.Type, String[])} + * @param subTaskPercntgInWholeTask The percentage of the copy subtask + */ + public void setSubTaskPercntgInWholeTask(float subTaskPercntgInWholeTask) { + this.subTaskPercntgInWholeTask = subTaskPercntgInWholeTask; + } + + static class SnapshotCopy extends ExportSnapshot { + private BackupInfo backupContext; + private TableName table; + + public SnapshotCopy(BackupInfo backupContext, TableName table) { + super(); + this.backupContext = backupContext; + this.table = table; + } + + public TableName getTable() { + return this.table; + } + + public BackupInfo getBackupInfo() { + return this.backupContext; + } + } + + /** + * Update the ongoing backup with new progress. + * @param backupContext backup context + * + * @param newProgress progress + * @param bytesCopied bytes copied + * @throws NoNodeException exception + */ + static void updateProgress(BackupInfo backupContext, BackupManager backupManager, + int newProgress, long bytesCopied) throws IOException { + // compose the new backup progress data, using fake number for now + String backupProgressData = newProgress + "%"; + + backupContext.setProgress(newProgress); + backupManager.updateBackupInfo(backupContext); + LOG.debug("Backup progress data \"" + backupProgressData + + "\" has been updated to hbase:backup for " + backupContext.getBackupId()); + } + + // Extends DistCp for progress updating to hbase:backup + // during backup. Using DistCpV2 (MAPREDUCE-2765). + // Simply extend it and override execute() method to get the + // Job reference for progress updating. + // Only the argument "src1, [src2, [...]] dst" is supported, + // no more DistCp options. + class BackupDistCp extends DistCp { + + private BackupInfo backupContext; + private BackupManager backupManager; + + public BackupDistCp(Configuration conf, DistCpOptions options, BackupInfo backupContext, + BackupManager backupManager) + throws Exception { + super(conf, options); + this.backupContext = backupContext; + this.backupManager = backupManager; + } + + @Override + public Job execute() throws Exception { + + // reflection preparation for private methods and fields + Class classDistCp = org.apache.hadoop.tools.DistCp.class; + Method methodCreateMetaFolderPath = classDistCp.getDeclaredMethod("createMetaFolderPath"); + Method methodCreateJob = classDistCp.getDeclaredMethod("createJob"); + Method methodCreateInputFileListing = + classDistCp.getDeclaredMethod("createInputFileListing", Job.class); + Method methodCleanup = classDistCp.getDeclaredMethod("cleanup"); + + Field fieldInputOptions = classDistCp.getDeclaredField("inputOptions"); + Field fieldMetaFolder = classDistCp.getDeclaredField("metaFolder"); + Field fieldJobFS = classDistCp.getDeclaredField("jobFS"); + Field fieldSubmitted = classDistCp.getDeclaredField("submitted"); + + methodCreateMetaFolderPath.setAccessible(true); + methodCreateJob.setAccessible(true); + methodCreateInputFileListing.setAccessible(true); + methodCleanup.setAccessible(true); + + fieldInputOptions.setAccessible(true); + fieldMetaFolder.setAccessible(true); + fieldJobFS.setAccessible(true); + fieldSubmitted.setAccessible(true); + + // execute() logic starts here + assert fieldInputOptions.get(this) != null; + // assert getConf() != null; + + Job job = null; + try { + synchronized (this) { + // Don't cleanup while we are setting up. + fieldMetaFolder.set(this, methodCreateMetaFolderPath.invoke(this)); + fieldJobFS.set(this, ((Path) fieldMetaFolder.get(this)).getFileSystem(this.getConf())); + job = (Job) methodCreateJob.invoke(this); + } + methodCreateInputFileListing.invoke(this, job); + + // Get the total length of the source files + List srcs = ((DistCpOptions) fieldInputOptions.get(this)).getSourcePaths(); + + long totalSrcLgth = 0; + for (Path aSrc : srcs) { + totalSrcLgth += BackupServerUtil.getFilesLength(aSrc.getFileSystem(this.getConf()), aSrc); + } + + // submit the copy job + job.submit(); + fieldSubmitted.set(this, true); + + // after submit the MR job, set its handler in backup handler for cancel process + // this.backupHandler.copyJob = job; + + // Update the copy progress to ZK every 0.5s if progress value changed + int progressReportFreq = + this.getConf().getInt("hbase.backup.progressreport.frequency", 500); + float lastProgress = progressDone; + while (!job.isComplete()) { + float newProgress = + progressDone + job.mapProgress() * subTaskPercntgInWholeTask * (1 - INIT_PROGRESS); + + if (newProgress > lastProgress) { + + BigDecimal progressData = + new BigDecimal(newProgress * 100).setScale(1, BigDecimal.ROUND_HALF_UP); + String newProgressStr = progressData + "%"; + LOG.info("Progress: " + newProgressStr); + updateProgress(backupContext, backupManager, progressData.intValue(), + bytesCopied); + LOG.debug("Backup progress data updated to hbase:backup: \"Progress: " + newProgressStr + + ".\""); + lastProgress = newProgress; + } + Thread.sleep(progressReportFreq); + } + // update the progress data after copy job complete + float newProgress = + progressDone + job.mapProgress() * subTaskPercntgInWholeTask * (1 - INIT_PROGRESS); + BigDecimal progressData = + new BigDecimal(newProgress * 100).setScale(1, BigDecimal.ROUND_HALF_UP); + + String newProgressStr = progressData + "%"; + LOG.info("Progress: " + newProgressStr); + + // accumulate the overall backup progress + progressDone = newProgress; + bytesCopied += totalSrcLgth; + + updateProgress(backupContext, backupManager, progressData.intValue(), + bytesCopied); + LOG.debug("Backup progress data updated to hbase:backup: \"Progress: " + newProgressStr + + " - " + bytesCopied + " bytes copied.\""); + + } finally { + if (!fieldSubmitted.getBoolean(this)) { + methodCleanup.invoke(this); + } + } + + String jobID = job.getJobID().toString(); + job.getConfiguration().set(DistCpConstants.CONF_LABEL_DISTCP_JOB_ID, jobID); + + LOG.debug("DistCp job-id: " + jobID); + return job; + } + + } + + + /** + * Do backup copy based on different types. + * @param context The backup context + * @param conf The hadoop configuration + * @param copyType The backup copy type + * @param options Options for customized ExportSnapshot or DistCp + * @throws Exception exception + */ + @Override + public int copy(BackupInfo context, BackupManager backupManager, Configuration conf, + BackupCopyService.Type copyType, String[] options) throws IOException { + int res = 0; + + try { + if (copyType == Type.FULL) { + SnapshotCopy snapshotCp = + new SnapshotCopy(context, context.getTableBySnapshot(options[1])); + LOG.debug("Doing SNAPSHOT_COPY"); + // Make a new instance of conf to be used by the snapshot copy class. + snapshotCp.setConf(new Configuration(conf)); + res = snapshotCp.run(options); + + } else if (copyType == Type.INCREMENTAL) { + LOG.debug("Doing COPY_TYPE_DISTCP"); + setSubTaskPercntgInWholeTask(1f); + + BackupDistCp distcp = new BackupDistCp(new Configuration(conf), null, context, + backupManager); + // Handle a special case where the source file is a single file. + // In this case, distcp will not create the target dir. It just take the + // target as a file name and copy source file to the target (as a file name). + // We need to create the target dir before run distcp. + LOG.debug("DistCp options: " + Arrays.toString(options)); + if (options.length == 2) { + Path dest = new Path(options[1]); + FileSystem destfs = dest.getFileSystem(conf); + if (!destfs.exists(dest)) { + destfs.mkdirs(dest); + } + } + res = distcp.run(options); + } + return res; + + } catch (Exception e) { + throw new IOException(e); + } + } + + @Override + public void cancelCopyJob(String jobId) throws IOException { + JobID id = JobID.forName(jobId); + Cluster cluster = new Cluster(this.getConf()); + try { + Job job = cluster.getJob(id); + if (job == null) { + LOG.error("No job found for " + id); + // should we throw exception + return; + } + if (job.isComplete() || job.isRetired()) { + return; + } + + job.killJob(); + LOG.debug("Killed copy job " + id); + } catch (InterruptedException e) { + throw new IOException(e); + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreService.java new file mode 100644 index 0000000..fa08adf --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceRestoreService.java @@ -0,0 +1,156 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.mapreduce; + +import java.io.IOException; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.IncrementalRestoreService; +import org.apache.hadoop.hbase.backup.util.BackupServerUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles; +import org.apache.hadoop.hbase.mapreduce.WALPlayer; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class MapReduceRestoreService implements IncrementalRestoreService { + public static final Log LOG = LogFactory.getLog(MapReduceRestoreService.class); + + private WALPlayer player; + + public MapReduceRestoreService() { + this.player = new WALPlayer(); + } + + @Override + public void run(Path[] logDirPaths, TableName[] tableNames, TableName[] newTableNames) + throws IOException { + + // WALPlayer reads all files in arbitrary directory structure and creates a Map task for each + // log file + String logDirs = StringUtils.join(logDirPaths, ","); + LOG.info("Restore incremental backup from directory " + logDirs + " from hbase tables " + + BackupServerUtil.join(tableNames) + " to tables " + BackupServerUtil.join(newTableNames)); + + for (int i = 0; i < tableNames.length; i++) { + + LOG.info("Restore "+ tableNames[i] + " into "+ newTableNames[i]); + + Path bulkOutputPath = getBulkOutputDir(getFileNameCompatibleString(newTableNames[i])); + String[] playerArgs = + { logDirs, tableNames[i].getNameAsString(), newTableNames[i].getNameAsString()}; + + int result = 0; + int loaderResult = 0; + try { + Configuration conf = getConf(); + conf.set(WALPlayer.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString()); + player.setConf(getConf()); + result = player.run(playerArgs); + if (succeeded(result)) { + // do bulk load + LoadIncrementalHFiles loader = createLoader(); + if (LOG.isDebugEnabled()) { + LOG.debug("Restoring HFiles from directory " + bulkOutputPath); + } + String[] args = { bulkOutputPath.toString(), newTableNames[i].getNameAsString() }; + loaderResult = loader.run(args); + if(failed(loaderResult)) { + throw new IOException("Can not restore from backup directory " + logDirs + + " (check Hadoop and HBase logs). Bulk loader return code =" + loaderResult); + } + } else { + throw new IOException("Can not restore from backup directory " + logDirs + + " (check Hadoop/MR and HBase logs). WALPlayer return code =" + result); + } + LOG.debug("Restore Job finished:" + result); + } catch (Exception e) { + throw new IOException("Can not restore from backup directory " + logDirs + + " (check Hadoop and HBase logs) ", e); + } + + } + } + + private String getFileNameCompatibleString(TableName table) + { + return table.getNamespaceAsString() +"-"+ table.getQualifierAsString(); + } + + private boolean failed(int result) { + return result != 0; + } + + private boolean succeeded(int result) { + return result == 0; + } + + private LoadIncrementalHFiles createLoader() + throws IOException { + // set configuration for restore: + // LoadIncrementalHFile needs more time + // hbase.rpc.timeout 600000 + // calculates + Integer milliSecInHour = 3600000; + Configuration conf = new Configuration(getConf()); + conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, milliSecInHour); + + // By default, it is 32 and loader will fail if # of files in any region exceed this + // limit. Bad for snapshot restore. + conf.setInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY, Integer.MAX_VALUE); + LoadIncrementalHFiles loader = null; + try { + loader = new LoadIncrementalHFiles(conf); + } catch (Exception e) { + throw new IOException(e); + } + return loader; + } + + private Path getBulkOutputDir(String tableName) throws IOException + { + Configuration conf = getConf(); + FileSystem fs = FileSystem.get(conf); + String tmp = conf.get("hbase.tmp.dir"); + Path path = new Path(tmp + Path.SEPARATOR + "bulk_output-"+tableName + "-" + + EnvironmentEdgeManager.currentTime()); + fs.deleteOnExit(path); + return path; + } + + + @Override + public Configuration getConf() { + return player.getConf(); + } + + @Override + public void setConf(Configuration conf) { + this.player.setConf(conf); + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupController.java new file mode 100644 index 0000000..f4ae35c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupController.java @@ -0,0 +1,56 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.master; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.coprocessor.BaseMasterAndRegionObserver; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.master.MasterServices; + +/** + * The current implementation checks if the backup system table + * (hbase:backup) exists on HBasae Master startup and if it does not - + * it creates it. We need to make sure that backup system table is + * created under HBase user with ADMIN privileges + */ +public class BackupController extends BaseMasterAndRegionObserver { + private static final Log LOG = LogFactory.getLog(BackupController.class.getName()); + + @Override + public void postStartMaster(ObserverContext ctx) + throws IOException { + // Need to create the new system table for backups (if does not exist) + MasterServices master = ctx.getEnvironment().getMasterServices(); + HTableDescriptor backupHTD = BackupSystemTable.getSystemTableDescriptor(); + try{ + master.createTable(backupHTD, null, HConstants.NO_NONCE, HConstants.NO_NONCE); + LOG.info("Created "+ BackupSystemTable.getTableNameAsString()+" table"); + } catch(TableExistsException e) { + LOG.info("Table "+ BackupSystemTable.getTableNameAsString() +" already exists"); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java new file mode 100644 index 0000000..04a91a6 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java @@ -0,0 +1,116 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup.master; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate; + +/** + * Implementation of a log cleaner that checks if a log is still scheduled for + * incremental backup before deleting it when its TTL is over. + */ +@InterfaceStability.Evolving +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) +public class BackupLogCleaner extends BaseLogCleanerDelegate { + private static final Log LOG = LogFactory.getLog(BackupLogCleaner.class); + + private boolean stopped = false; + + public BackupLogCleaner() { + } + + @Override + public Iterable getDeletableFiles(Iterable files) { + // all members of this class are null if backup is disabled, + // so we cannot filter the files + if (this.getConf() == null) { + return files; + } + + List list = new ArrayList(); + // TODO: LogCleaners do not have a way to get the Connection from Master. We should find a + // way to pass it down here, so that this connection is not re-created every time. + // It is expensive + try (final Connection conn = ConnectionFactory.createConnection(getConf()); + final BackupSystemTable table = new BackupSystemTable(conn)) { + // If we do not have recorded backup sessions + if (!table.hasBackupSessions()) { + LOG.debug("BackupLogCleaner has no backup sessions"); + return files; + } + + for(FileStatus file: files){ + String wal = file.getPath().toString(); + boolean logInSystemTable = table.isWALFileDeletable(wal); + if(LOG.isDebugEnabled()) { + if(logInSystemTable) { + LOG.debug("Found log file in hbase:backup, deleting: " + wal); + list.add(file); + } else { + LOG.debug("Didn't find this log in hbase:backup, keeping: " + wal); + } + } + } + return list; + } catch (IOException e) { + LOG.error("Failed to get hbase:backup table, therefore will keep all files", e); + // nothing to delete + return new ArrayList(); + } + } + + @Override + public void setConf(Configuration config) { + // If backup is disabled, keep all members null + if (!config.getBoolean(HConstants.BACKUP_ENABLE_KEY, HConstants.BACKUP_ENABLE_DEFAULT)) { + LOG.warn("Backup is disabled - allowing all wals to be deleted"); + return; + } + super.setConf(config); + } + + @Override + public void stop(String why) { + if (this.stopped) { + return; + } + this.stopped = true; + LOG.info("Stopping BackupLogCleaner"); + } + + @Override + public boolean isStopped() { + return this.stopped; + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/FullTableBackupProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/FullTableBackupProcedure.java new file mode 100644 index 0000000..0a12b62 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/FullTableBackupProcedure.java @@ -0,0 +1,746 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.master; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupCopyService; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupRestoreServerFactory; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.BackupCopyService.Type; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.impl.BackupException; +import org.apache.hadoop.hbase.backup.impl.BackupManager; +import org.apache.hadoop.hbase.backup.impl.BackupManifest; +import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; +import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.backup.util.BackupServerUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; +import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; +import org.apache.hadoop.hbase.procedure.MasterProcedureManager; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.FullTableBackupState; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSUtils; + +@InterfaceAudience.Private +public class FullTableBackupProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(FullTableBackupProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + private Configuration conf; + private String backupId; + private List tableList; + private String targetRootDir; + HashMap newTimestamps = null; + + private BackupManager backupManager; + private BackupInfo backupContext; + + public FullTableBackupProcedure() { + // Required by the Procedure framework to create the procedure on replay + } + + public FullTableBackupProcedure(final MasterProcedureEnv env, + final String backupId, List tableList, String targetRootDir, final int workers, + final long bandwidth) throws IOException { + backupManager = new BackupManager(env.getMasterConfiguration()); + this.backupId = backupId; + this.tableList = tableList; + this.targetRootDir = targetRootDir; + backupContext = + backupManager.createBackupContext(backupId, BackupType.FULL, tableList, targetRootDir, + workers, bandwidth); + if (tableList == null || tableList.isEmpty()) { + this.tableList = new ArrayList<>(backupContext.getTables()); + } + } + + @Override + public byte[] getResult() { + return backupId.getBytes(); + } + + /** + * Begin the overall backup. + * @param backupContext backup context + * @throws IOException exception + */ + static void beginBackup(BackupManager backupManager, BackupInfo backupContext) + throws IOException { + backupManager.setBackupContext(backupContext); + // set the start timestamp of the overall backup + long startTs = EnvironmentEdgeManager.currentTime(); + backupContext.setStartTs(startTs); + // set overall backup status: ongoing + backupContext.setState(BackupState.RUNNING); + LOG.info("Backup " + backupContext.getBackupId() + " started at " + startTs + "."); + + backupManager.updateBackupInfo(backupContext); + if (LOG.isDebugEnabled()) { + LOG.debug("Backup session " + backupContext.getBackupId() + " has been started."); + } + } + + private static String getMessage(Exception e) { + String msg = e.getMessage(); + if (msg == null || msg.equals("")) { + msg = e.getClass().getName(); + } + return msg; + } + + /** + * Delete HBase snapshot for backup. + * @param backupCtx backup context + * @throws Exception exception + */ + private static void deleteSnapshot(final MasterProcedureEnv env, + BackupInfo backupCtx, Configuration conf) + throws IOException { + LOG.debug("Trying to delete snapshot for full backup."); + for (String snapshotName : backupCtx.getSnapshotNames()) { + if (snapshotName == null) { + continue; + } + LOG.debug("Trying to delete snapshot: " + snapshotName); + HBaseProtos.SnapshotDescription.Builder builder = + HBaseProtos.SnapshotDescription.newBuilder(); + builder.setName(snapshotName); + try { + env.getMasterServices().getSnapshotManager().deleteSnapshot(builder.build()); + } catch (IOException ioe) { + LOG.debug("when deleting snapshot " + snapshotName, ioe); + } + LOG.debug("Deleting the snapshot " + snapshotName + " for backup " + + backupCtx.getBackupId() + " succeeded."); + } + } + + /** + * Clean up directories with prefix "exportSnapshot-", which are generated when exporting + * snapshots. + * @throws IOException exception + */ + private static void cleanupExportSnapshotLog(Configuration conf) throws IOException { + FileSystem fs = FSUtils.getCurrentFileSystem(conf); + Path stagingDir = + new Path(conf.get(BackupRestoreConstants.CONF_STAGING_ROOT, fs.getWorkingDirectory() + .toString())); + FileStatus[] files = FSUtils.listStatus(fs, stagingDir); + if (files == null) { + return; + } + for (FileStatus file : files) { + if (file.getPath().getName().startsWith("exportSnapshot-")) { + LOG.debug("Delete log files of exporting snapshot: " + file.getPath().getName()); + if (FSUtils.delete(fs, file.getPath(), true) == false) { + LOG.warn("Can not delete " + file.getPath()); + } + } + } + } + + /** + * Clean up the uncompleted data at target directory if the ongoing backup has already entered the + * copy phase. + */ + static void cleanupTargetDir(BackupInfo backupContext, Configuration conf) { + try { + // clean up the uncompleted data at target directory if the ongoing backup has already entered + // the copy phase + LOG.debug("Trying to cleanup up target dir. Current backup phase: " + + backupContext.getPhase()); + if (backupContext.getPhase().equals(BackupPhase.SNAPSHOTCOPY) + || backupContext.getPhase().equals(BackupPhase.INCREMENTAL_COPY) + || backupContext.getPhase().equals(BackupPhase.STORE_MANIFEST)) { + FileSystem outputFs = + FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf); + + // now treat one backup as a transaction, clean up data that has been partially copied at + // table level + for (TableName table : backupContext.getTables()) { + Path targetDirPath = + new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(), + backupContext.getBackupId(), table)); + if (outputFs.delete(targetDirPath, true)) { + LOG.info("Cleaning up uncompleted backup data at " + targetDirPath.toString() + + " done."); + } else { + LOG.info("No data has been copied to " + targetDirPath.toString() + "."); + } + + Path tableDir = targetDirPath.getParent(); + FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir); + if (backups == null || backups.length == 0) { + outputFs.delete(tableDir, true); + LOG.debug(tableDir.toString() + " is empty, remove it."); + } + } + } + + } catch (IOException e1) { + LOG.error("Cleaning up uncompleted backup data of " + backupContext.getBackupId() + " at " + + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); + } + } + + /** + * Fail the overall backup. + * @param backupContext backup context + * @param e exception + * @throws Exception exception + */ + static void failBackup(final MasterProcedureEnv env, BackupInfo backupContext, + BackupManager backupManager, Exception e, + String msg, BackupType type, Configuration conf) throws IOException { + LOG.error(msg + getMessage(e)); + // If this is a cancel exception, then we've already cleaned. + + // set the failure timestamp of the overall backup + backupContext.setEndTs(EnvironmentEdgeManager.currentTime()); + + // set failure message + backupContext.setFailedMsg(e.getMessage()); + + // set overall backup status: failed + backupContext.setState(BackupState.FAILED); + + // compose the backup failed data + String backupFailedData = + "BackupId=" + backupContext.getBackupId() + ",startts=" + backupContext.getStartTs() + + ",failedts=" + backupContext.getEndTs() + ",failedphase=" + backupContext.getPhase() + + ",failedmessage=" + backupContext.getFailedMsg(); + LOG.error(backupFailedData); + + backupManager.updateBackupInfo(backupContext); + + // if full backup, then delete HBase snapshots if there already are snapshots taken + // and also clean up export snapshot log files if exist + if (type == BackupType.FULL) { + deleteSnapshot(env, backupContext, conf); + cleanupExportSnapshotLog(conf); + } + + // clean up the uncompleted data at target directory if the ongoing backup has already entered + // the copy phase + // For incremental backup, DistCp logs will be cleaned with the targetDir. + cleanupTargetDir(backupContext, conf); + + LOG.info("Backup " + backupContext.getBackupId() + " failed."); + } + + /** + * Do snapshot copy. + * @param backupContext backup context + * @throws Exception exception + */ + private void snapshotCopy(BackupInfo backupContext) throws Exception { + LOG.info("Snapshot copy is starting."); + + // set overall backup phase: snapshot_copy + backupContext.setPhase(BackupPhase.SNAPSHOTCOPY); + + // call ExportSnapshot to copy files based on hbase snapshot for backup + // ExportSnapshot only support single snapshot export, need loop for multiple tables case + BackupCopyService copyService = BackupRestoreServerFactory.getBackupCopyService(conf); + + // number of snapshots matches number of tables + float numOfSnapshots = backupContext.getSnapshotNames().size(); + + LOG.debug("There are " + (int) numOfSnapshots + " snapshots to be copied."); + + for (TableName table : backupContext.getTables()) { + // Currently we simply set the sub copy tasks by counting the table snapshot number, we can + // calculate the real files' size for the percentage in the future. + // backupCopier.setSubTaskPercntgInWholeTask(1f / numOfSnapshots); + int res = 0; + String[] args = new String[4]; + args[0] = "-snapshot"; + args[1] = backupContext.getSnapshotName(table); + args[2] = "-copy-to"; + args[3] = backupContext.getBackupStatus(table).getTargetDir(); + + LOG.debug("Copy snapshot " + args[1] + " to " + args[3]); + res = copyService.copy(backupContext, backupManager, conf, BackupCopyService.Type.FULL, args); + // if one snapshot export failed, do not continue for remained snapshots + if (res != 0) { + LOG.error("Exporting Snapshot " + args[1] + " failed with return code: " + res + "."); + + throw new IOException("Failed of exporting snapshot " + args[1] + " to " + args[3] + + " with reason code " + res); + } + + LOG.info("Snapshot copy " + args[1] + " finished."); + } + } + + /** + * Add manifest for the current backup. The manifest is stored + * within the table backup directory. + * @param backupContext The current backup context + * @throws IOException exception + * @throws BackupException exception + */ + private static void addManifest(BackupInfo backupContext, BackupManager backupManager, + BackupType type, Configuration conf) throws IOException, BackupException { + // set the overall backup phase : store manifest + backupContext.setPhase(BackupPhase.STORE_MANIFEST); + + BackupManifest manifest; + + // Since we have each table's backup in its own directory structure, + // we'll store its manifest with the table directory. + for (TableName table : backupContext.getTables()) { + manifest = new BackupManifest(backupContext, table); + ArrayList ancestors = backupManager.getAncestors(backupContext, table); + for (BackupImage image : ancestors) { + manifest.addDependentImage(image); + } + + if (type == BackupType.INCREMENTAL) { + // We'll store the log timestamps for this table only in its manifest. + HashMap> tableTimestampMap = + new HashMap>(); + tableTimestampMap.put(table, backupContext.getIncrTimestampMap().get(table)); + manifest.setIncrTimestampMap(tableTimestampMap); + ArrayList ancestorss = backupManager.getAncestors(backupContext); + for (BackupImage image : ancestorss) { + manifest.addDependentImage(image); + } + } + manifest.store(conf); + } + + // For incremental backup, we store a overall manifest in + // /WALs/ + // This is used when created the next incremental backup + if (type == BackupType.INCREMENTAL) { + manifest = new BackupManifest(backupContext); + // set the table region server start and end timestamps for incremental backup + manifest.setIncrTimestampMap(backupContext.getIncrTimestampMap()); + ArrayList ancestors = backupManager.getAncestors(backupContext); + for (BackupImage image : ancestors) { + manifest.addDependentImage(image); + } + manifest.store(conf); + } + } + + /** + * Get backup request meta data dir as string. + * @param backupContext backup context + * @return meta data dir + */ + private static String obtainBackupMetaDataStr(BackupInfo backupContext) { + StringBuffer sb = new StringBuffer(); + sb.append("type=" + backupContext.getType() + ",tablelist="); + for (TableName table : backupContext.getTables()) { + sb.append(table + ";"); + } + if (sb.lastIndexOf(";") > 0) { + sb.delete(sb.lastIndexOf(";"), sb.lastIndexOf(";") + 1); + } + sb.append(",targetRootDir=" + backupContext.getTargetRootDir()); + + return sb.toString(); + } + + /** + * Clean up directories with prefix "_distcp_logs-", which are generated when DistCp copying + * hlogs. + * @throws IOException exception + */ + private static void cleanupDistCpLog(BackupInfo backupContext, Configuration conf) + throws IOException { + Path rootPath = new Path(backupContext.getHLogTargetDir()).getParent(); + FileSystem fs = FileSystem.get(rootPath.toUri(), conf); + FileStatus[] files = FSUtils.listStatus(fs, rootPath); + if (files == null) { + return; + } + for (FileStatus file : files) { + if (file.getPath().getName().startsWith("_distcp_logs")) { + LOG.debug("Delete log files of DistCp: " + file.getPath().getName()); + FSUtils.delete(fs, file.getPath(), true); + } + } + } + + /** + * Complete the overall backup. + * @param backupContext backup context + * @throws Exception exception + */ + static void completeBackup(final MasterProcedureEnv env, BackupInfo backupContext, + BackupManager backupManager, BackupType type, Configuration conf) throws IOException { + // set the complete timestamp of the overall backup + backupContext.setEndTs(EnvironmentEdgeManager.currentTime()); + // set overall backup status: complete + backupContext.setState(BackupState.COMPLETE); + backupContext.setProgress(100); + // add and store the manifest for the backup + addManifest(backupContext, backupManager, type, conf); + + // after major steps done and manifest persisted, do convert if needed for incremental backup + /* in-fly convert code here, provided by future jira */ + LOG.debug("in-fly convert code here, provided by future jira"); + + // compose the backup complete data + String backupCompleteData = + obtainBackupMetaDataStr(backupContext) + ",startts=" + backupContext.getStartTs() + + ",completets=" + backupContext.getEndTs() + ",bytescopied=" + + backupContext.getTotalBytesCopied(); + if (LOG.isDebugEnabled()) { + LOG.debug("Backup " + backupContext.getBackupId() + " finished: " + backupCompleteData); + } + backupManager.updateBackupInfo(backupContext); + + // when full backup is done: + // - delete HBase snapshot + // - clean up directories with prefix "exportSnapshot-", which are generated when exporting + // snapshots + if (type == BackupType.FULL) { + deleteSnapshot(env, backupContext, conf); + cleanupExportSnapshotLog(conf); + } else if (type == BackupType.INCREMENTAL) { + cleanupDistCpLog(backupContext, conf); + } + + LOG.info("Backup " + backupContext.getBackupId() + " completed."); + } + + /** + * Wrap a SnapshotDescription for a target table. + * @param table table + * @return a SnapshotDescription especially for backup. + */ + static SnapshotDescription wrapSnapshotDescription(TableName tableName, String snapshotName) { + // Mock a SnapshotDescription from backupContext to call SnapshotManager function, + // Name it in the format "snapshot__" + HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder(); + builder.setTable(tableName.getNameAsString()); + builder.setName(snapshotName); + HBaseProtos.SnapshotDescription backupSnapshot = builder.build(); + + LOG.debug("Wrapped a SnapshotDescription " + backupSnapshot.getName() + + " from backupContext to request snapshot for backup."); + + return backupSnapshot; + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, final FullTableBackupState state) + throws InterruptedException { + if (conf == null) { + conf = env.getMasterConfiguration(); + } + if (backupManager == null) { + try { + backupManager = new BackupManager(env.getMasterConfiguration()); + } catch (IOException ioe) { + setFailure("full backup", ioe); + return Flow.NO_MORE_STATE; + } + } + if (LOG.isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + try { + switch (state) { + case PRE_SNAPSHOT_TABLE: + beginBackup(backupManager, backupContext); + String savedStartCode = null; + boolean firstBackup = false; + // do snapshot for full table backup + + try { + savedStartCode = backupManager.readBackupStartCode(); + firstBackup = savedStartCode == null || Long.parseLong(savedStartCode) == 0L; + if (firstBackup) { + // This is our first backup. Let's put some marker on ZK so that we can hold the logs + // while we do the backup. + backupManager.writeBackupStartCode(0L); + } + // We roll log here before we do the snapshot. It is possible there is duplicate data + // in the log that is already in the snapshot. But if we do it after the snapshot, we + // could have data loss. + // A better approach is to do the roll log on each RS in the same global procedure as + // the snapshot. + LOG.info("Execute roll log procedure for full backup ..."); + MasterProcedureManager mpm = env.getMasterServices().getMasterProcedureManagerHost() + .getProcedureManager(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE); + Map props= new HashMap(); + props.put("backupRoot", backupContext.getTargetRootDir()); + long waitTime = MasterProcedureUtil.execProcedure(mpm, + LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, + LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props); + MasterProcedureUtil.waitForProcedure(mpm, + LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE, + LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props, waitTime, + conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER), + conf.getLong(HConstants.HBASE_CLIENT_PAUSE, + HConstants.DEFAULT_HBASE_CLIENT_PAUSE)); + + newTimestamps = backupManager.readRegionServerLastLogRollResult(); + if (firstBackup) { + // Updates registered log files + // We record ALL old WAL files as registered, because + // this is a first full backup in the system and these + // files are not needed for next incremental backup + List logFiles = BackupServerUtil.getWALFilesOlderThan(conf, newTimestamps); + backupManager.recordWALFiles(logFiles); + } + } catch (BackupException e) { + setFailure("Failure in full-backup: pre-snapshot phase", e); + // fail the overall backup and return + failBackup(env, backupContext, backupManager, e, "Unexpected BackupException : ", + BackupType.FULL, conf); + return Flow.NO_MORE_STATE; + } + setNextState(FullTableBackupState.SNAPSHOT_TABLES); + break; + case SNAPSHOT_TABLES: + for (TableName tableName : tableList) { + String snapshotName = "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) + + "_" + tableName.getNamespaceAsString() + "_" + tableName.getQualifierAsString(); + HBaseProtos.SnapshotDescription backupSnapshot; + + // wrap a SnapshotDescription for offline/online snapshot + backupSnapshot = wrapSnapshotDescription(tableName,snapshotName); + try { + env.getMasterServices().getSnapshotManager().deleteSnapshot(backupSnapshot); + } catch (IOException e) { + LOG.debug("Unable to delete " + snapshotName, e); + } + // Kick off snapshot for backup + try { + env.getMasterServices().getSnapshotManager().takeSnapshot(backupSnapshot); + } catch (IOException e) { + LOG.debug("Unable to take snapshot: " + snapshotName, e); + } + long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout( + env.getMasterConfiguration(), + backupSnapshot.getType(), SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME); + BackupServerUtil.waitForSnapshot(backupSnapshot, waitTime, + env.getMasterServices().getSnapshotManager(), env.getMasterConfiguration()); + // set the snapshot name in BackupStatus of this table, only after snapshot success. + backupContext.setSnapshotName(tableName, backupSnapshot.getName()); + } + setNextState(FullTableBackupState.SNAPSHOT_COPY); + break; + case SNAPSHOT_COPY: + // do snapshot copy + LOG.debug("snapshot copy for " + backupId); + try { + this.snapshotCopy(backupContext); + } catch (Exception e) { + setFailure("Failure in full-backup: snapshot copy phase" + backupId, e); + // fail the overall backup and return + failBackup(env, backupContext, backupManager, e, "Unexpected BackupException : ", + BackupType.FULL, conf); + return Flow.NO_MORE_STATE; + } + // Updates incremental backup table set + backupManager.addIncrementalBackupTableSet(backupContext.getTables()); + setNextState(FullTableBackupState.BACKUP_COMPLETE); + break; + + case BACKUP_COMPLETE: + // set overall backup status: complete. Here we make sure to complete the backup. + // After this checkpoint, even if entering cancel process, will let the backup finished + backupContext.setState(BackupState.COMPLETE); + // The table list in backupContext is good for both full backup and incremental backup. + // For incremental backup, it contains the incremental backup table set. + backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps); + + HashMap> newTableSetTimestampMap = + backupManager.readLogTimestampMap(); + + Long newStartCode = + BackupClientUtil.getMinValue(BackupServerUtil.getRSLogTimestampMins(newTableSetTimestampMap)); + backupManager.writeBackupStartCode(newStartCode); + + // backup complete + completeBackup(env, backupContext, backupManager, BackupType.FULL, conf); + return Flow.NO_MORE_STATE; + + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + LOG.error("Backup failed in " + state); + setFailure("snapshot-table", e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final FullTableBackupState state) + throws IOException { + if (state != FullTableBackupState.PRE_SNAPSHOT_TABLE) { + deleteSnapshot(env, backupContext, conf); + cleanupExportSnapshotLog(conf); + } + + // clean up the uncompleted data at target directory if the ongoing backup has already entered + // the copy phase + // For incremental backup, DistCp logs will be cleaned with the targetDir. + if (state == FullTableBackupState.SNAPSHOT_COPY) { + cleanupTargetDir(backupContext, conf); + } + } + + @Override + protected FullTableBackupState getState(final int stateId) { + return FullTableBackupState.valueOf(stateId); + } + + @Override + protected int getStateId(final FullTableBackupState state) { + return state.getNumber(); + } + + @Override + protected FullTableBackupState getInitialState() { + return FullTableBackupState.PRE_SNAPSHOT_TABLE; + } + + @Override + protected void setNextState(final FullTableBackupState state) { + if (aborted.get()) { + setAbortFailure("backup-table", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (targetRootDir="); + sb.append(targetRootDir); + sb.append("; backupId=").append(backupId); + sb.append("; tables="); + int len = tableList.size(); + for (int i = 0; i < len-1; i++) { + sb.append(tableList.get(i)).append(","); + } + sb.append(tableList.get(len-1)); + sb.append(")"); + } + + BackupProtos.BackupProcContext toBackupContext() { + BackupProtos.BackupProcContext.Builder ctxBuilder = BackupProtos.BackupProcContext.newBuilder(); + ctxBuilder.setCtx(backupContext.toProtosBackupInfo()); + if (newTimestamps != null && !newTimestamps.isEmpty()) { + BackupProtos.ServerTimestamp.Builder tsBuilder = ServerTimestamp.newBuilder(); + for (Entry entry : newTimestamps.entrySet()) { + tsBuilder.clear().setServer(entry.getKey()).setTimestamp(entry.getValue()); + ctxBuilder.addServerTimestamp(tsBuilder.build()); + } + } + return ctxBuilder.build(); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + BackupProtos.BackupProcContext backupProcCtx = toBackupContext(); + backupProcCtx.writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + BackupProtos.BackupProcContext proto =BackupProtos.BackupProcContext.parseDelimitedFrom(stream); + backupContext = BackupInfo.fromProto(proto.getCtx()); + backupId = backupContext.getBackupId(); + targetRootDir = backupContext.getTargetRootDir(); + tableList = backupContext.getTableNames(); + List svrTimestamps = proto.getServerTimestampList(); + if (svrTimestamps != null && !svrTimestamps.isEmpty()) { + newTimestamps = new HashMap<>(); + for (ServerTimestamp ts : svrTimestamps) { + newTimestamps.put(ts.getServer(), ts.getTimestamp()); + } + } + } + + @Override + public TableName getTableName() { + return TableName.BACKUP_TABLE_NAME; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.BACKUP; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (env.waitInitialized(this)) { + return false; + } + return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/IncrementalTableBackupProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/IncrementalTableBackupProcedure.java new file mode 100644 index 0000000..6e90309 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/IncrementalTableBackupProcedure.java @@ -0,0 +1,395 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.master; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map.Entry; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupCopyService; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.BackupRestoreServerFactory; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.BackupCopyService.Type; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupPhase; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.impl.BackupManager; +import org.apache.hadoop.hbase.backup.impl.IncrementalBackupManager; +import org.apache.hadoop.hbase.backup.util.BackupClientUtil; +import org.apache.hadoop.hbase.backup.util.BackupServerUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.IncrementalTableBackupState; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp; + +@InterfaceAudience.Private +public class IncrementalTableBackupProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(IncrementalTableBackupProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + private Configuration conf; + private String backupId; + private List tableList; + private String targetRootDir; + HashMap newTimestamps = null; + + private BackupManager backupManager; + private BackupInfo backupContext; + + public IncrementalTableBackupProcedure() { + // Required by the Procedure framework to create the procedure on replay + } + + public IncrementalTableBackupProcedure(final MasterProcedureEnv env, + final String backupId, + List tableList, String targetRootDir, final int workers, + final long bandwidth) throws IOException { + backupManager = new BackupManager(env.getMasterConfiguration()); + this.backupId = backupId; + this.tableList = tableList; + this.targetRootDir = targetRootDir; + backupContext = backupManager.createBackupContext(backupId, + BackupType.INCREMENTAL, tableList, targetRootDir, workers, (int)bandwidth); + } + + @Override + public byte[] getResult() { + return backupId.getBytes(); + } + + private List filterMissingFiles(List incrBackupFileList) throws IOException { + FileSystem fs = FileSystem.get(conf); + List list = new ArrayList(); + for (String file : incrBackupFileList) { + if (fs.exists(new Path(file))) { + list.add(file); + } else { + LOG.warn("Can't find file: " + file); + } + } + return list; + } + + private List getMissingFiles(List incrBackupFileList) throws IOException { + FileSystem fs = FileSystem.get(conf); + List list = new ArrayList(); + for (String file : incrBackupFileList) { + if (!fs.exists(new Path(file))) { + list.add(file); + } + } + return list; + + } + + /** + * Do incremental copy. + * @param backupContext backup context + */ + private void incrementalCopy(BackupInfo backupContext) throws Exception { + + LOG.info("Incremental copy is starting."); + // set overall backup phase: incremental_copy + backupContext.setPhase(BackupPhase.INCREMENTAL_COPY); + // get incremental backup file list and prepare parms for DistCp + List incrBackupFileList = backupContext.getIncrBackupFileList(); + // filter missing files out (they have been copied by previous backups) + incrBackupFileList = filterMissingFiles(incrBackupFileList); + String[] strArr = incrBackupFileList.toArray(new String[incrBackupFileList.size() + 1]); + strArr[strArr.length - 1] = backupContext.getHLogTargetDir(); + + BackupCopyService copyService = BackupRestoreServerFactory.getBackupCopyService(conf); + int counter = 0; + int MAX_ITERAIONS = 2; + while (counter++ < MAX_ITERAIONS) { + // We run DistCp maximum 2 times + // If it fails on a second time, we throw Exception + int res = copyService.copy(backupContext, backupManager, conf, + BackupCopyService.Type.INCREMENTAL, strArr); + + if (res != 0) { + LOG.error("Copy incremental log files failed with return code: " + res + "."); + throw new IOException("Failed of Hadoop Distributed Copy from "+ + StringUtils.join(incrBackupFileList, ",") +" to " + + backupContext.getHLogTargetDir()); + } + List missingFiles = getMissingFiles(incrBackupFileList); + + if(missingFiles.isEmpty()) { + break; + } else { + // Repeat DistCp, some files have been moved from WALs to oldWALs during previous run + // update backupContext and strAttr + if(counter == MAX_ITERAIONS){ + String msg = "DistCp could not finish the following files: " + + StringUtils.join(missingFiles, ","); + LOG.error(msg); + throw new IOException(msg); + } + List converted = convertFilesFromWALtoOldWAL(missingFiles); + incrBackupFileList.removeAll(missingFiles); + incrBackupFileList.addAll(converted); + backupContext.setIncrBackupFileList(incrBackupFileList); + + // Run DistCp only for missing files (which have been moved from WALs to oldWALs + // during previous run) + strArr = converted.toArray(new String[converted.size() + 1]); + strArr[strArr.length - 1] = backupContext.getHLogTargetDir(); + } + } + + + LOG.info("Incremental copy from " + StringUtils.join(incrBackupFileList, ",") + " to " + + backupContext.getHLogTargetDir() + " finished."); + } + + + private List convertFilesFromWALtoOldWAL(List missingFiles) throws IOException { + List list = new ArrayList(); + for(String path: missingFiles){ + if(path.indexOf(Path.SEPARATOR + HConstants.HREGION_LOGDIR_NAME) < 0) { + LOG.error("Copy incremental log files failed, file is missing : " + path); + throw new IOException("Failed of Hadoop Distributed Copy to " + + backupContext.getHLogTargetDir()+", file is missing "+ path); + } + list.add(path.replace(Path.SEPARATOR + HConstants.HREGION_LOGDIR_NAME, + Path.SEPARATOR + HConstants.HREGION_OLDLOGDIR_NAME)); + } + return list; + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, + final IncrementalTableBackupState state) + throws InterruptedException { + if (conf == null) { + conf = env.getMasterConfiguration(); + } + if (backupManager == null) { + try { + backupManager = new BackupManager(env.getMasterConfiguration()); + } catch (IOException ioe) { + setFailure("incremental backup", ioe); + } + } + if (LOG.isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + try { + switch (state) { + case PREPARE_INCREMENTAL: + FullTableBackupProcedure.beginBackup(backupManager, backupContext); + LOG.debug("For incremental backup, current table set is " + + backupManager.getIncrementalBackupTableSet()); + try { + IncrementalBackupManager incrBackupManager =new IncrementalBackupManager(backupManager); + + newTimestamps = incrBackupManager.getIncrBackupLogFileList(backupContext); + } catch (Exception e) { + setFailure("Failure in incremental-backup: preparation phase " + backupId, e); + // fail the overall backup and return + FullTableBackupProcedure.failBackup(env, backupContext, backupManager, e, + "Unexpected Exception : ", BackupType.INCREMENTAL, conf); + } + + setNextState(IncrementalTableBackupState.INCREMENTAL_COPY); + break; + case INCREMENTAL_COPY: + try { + // copy out the table and region info files for each table + BackupServerUtil.copyTableRegionInfo(backupContext, conf); + incrementalCopy(backupContext); + // Save list of WAL files copied + backupManager.recordWALFiles(backupContext.getIncrBackupFileList()); + } catch (Exception e) { + String msg = "Unexpected exception in incremental-backup: incremental copy " + backupId; + setFailure(msg, e); + // fail the overall backup and return + FullTableBackupProcedure.failBackup(env, backupContext, backupManager, e, + msg, BackupType.INCREMENTAL, conf); + } + setNextState(IncrementalTableBackupState.INCR_BACKUP_COMPLETE); + break; + case INCR_BACKUP_COMPLETE: + // set overall backup status: complete. Here we make sure to complete the backup. + // After this checkpoint, even if entering cancel process, will let the backup finished + backupContext.setState(BackupState.COMPLETE); + // Set the previousTimestampMap which is before this current log roll to the manifest. + HashMap> previousTimestampMap = + backupManager.readLogTimestampMap(); + backupContext.setIncrTimestampMap(previousTimestampMap); + + // The table list in backupContext is good for both full backup and incremental backup. + // For incremental backup, it contains the incremental backup table set. + backupManager.writeRegionServerLogTimestamp(backupContext.getTables(), newTimestamps); + + HashMap> newTableSetTimestampMap = + backupManager.readLogTimestampMap(); + + Long newStartCode = BackupClientUtil + .getMinValue(BackupServerUtil.getRSLogTimestampMins(newTableSetTimestampMap)); + backupManager.writeBackupStartCode(newStartCode); + // backup complete + FullTableBackupProcedure.completeBackup(env, backupContext, backupManager, + BackupType.INCREMENTAL, conf); + return Flow.NO_MORE_STATE; + + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + setFailure("snapshot-table", e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, + final IncrementalTableBackupState state) throws IOException { + // clean up the uncompleted data at target directory if the ongoing backup has already entered + // the copy phase + // For incremental backup, DistCp logs will be cleaned with the targetDir. + FullTableBackupProcedure.cleanupTargetDir(backupContext, conf); + } + + @Override + protected IncrementalTableBackupState getState(final int stateId) { + return IncrementalTableBackupState.valueOf(stateId); + } + + @Override + protected int getStateId(final IncrementalTableBackupState state) { + return state.getNumber(); + } + + @Override + protected IncrementalTableBackupState getInitialState() { + return IncrementalTableBackupState.PREPARE_INCREMENTAL; + } + + @Override + protected void setNextState(final IncrementalTableBackupState state) { + if (aborted.get()) { + setAbortFailure("snapshot-table", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (targetRootDir="); + sb.append(targetRootDir); + sb.append("; backupId=").append(backupId); + sb.append("; tables="); + int len = tableList.size(); + for (int i = 0; i < len-1; i++) { + sb.append(tableList.get(i)).append(","); + } + sb.append(tableList.get(len-1)); + sb.append(")"); + } + + BackupProtos.BackupProcContext toBackupContext() { + BackupProtos.BackupProcContext.Builder ctxBuilder = BackupProtos.BackupProcContext.newBuilder(); + ctxBuilder.setCtx(backupContext.toProtosBackupInfo()); + if (newTimestamps != null && !newTimestamps.isEmpty()) { + BackupProtos.ServerTimestamp.Builder tsBuilder = ServerTimestamp.newBuilder(); + for (Entry entry : newTimestamps.entrySet()) { + tsBuilder.clear().setServer(entry.getKey()).setTimestamp(entry.getValue()); + ctxBuilder.addServerTimestamp(tsBuilder.build()); + } + } + return ctxBuilder.build(); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + BackupProtos.BackupProcContext backupProcCtx = toBackupContext(); + backupProcCtx.writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + BackupProtos.BackupProcContext proto =BackupProtos.BackupProcContext.parseDelimitedFrom(stream); + backupContext = BackupInfo.fromProto(proto.getCtx()); + backupId = backupContext.getBackupId(); + targetRootDir = backupContext.getTargetRootDir(); + tableList = backupContext.getTableNames(); + List svrTimestamps = proto.getServerTimestampList(); + if (svrTimestamps != null && !svrTimestamps.isEmpty()) { + newTimestamps = new HashMap<>(); + for (ServerTimestamp ts : svrTimestamps) { + newTimestamps.put(ts.getServer(), ts.getTimestamp()); + } + } + } + + @Override + public TableName getTableName() { + return TableName.BACKUP_TABLE_NAME; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.BACKUP; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (env.waitInitialized(this)) { + return false; + } + return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java new file mode 100644 index 0000000..a5f32bb --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/master/LogRollMasterProcedureManager.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.master; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ThreadPoolExecutor; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.CoordinatedStateManagerFactory; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; +import org.apache.hadoop.hbase.errorhandling.ForeignException; +import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.MetricsMaster; +import org.apache.hadoop.hbase.procedure.MasterProcedureManager; +import org.apache.hadoop.hbase.procedure.Procedure; +import org.apache.hadoop.hbase.procedure.ProcedureCoordinator; +import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; +import org.apache.zookeeper.KeeperException; + +public class LogRollMasterProcedureManager extends MasterProcedureManager { + + public static final String ROLLLOG_PROCEDURE_SIGNATURE = "rolllog-proc"; + public static final String ROLLLOG_PROCEDURE_NAME = "rolllog"; + private static final Log LOG = LogFactory.getLog(LogRollMasterProcedureManager.class); + + private MasterServices master; + private ProcedureCoordinator coordinator; + private boolean done; + + @Override + public void stop(String why) { + LOG.info("stop: " + why); + } + + @Override + public boolean isStopped() { + return false; + } + + @Override + public void initialize(MasterServices master, MetricsMaster metricsMaster) + throws KeeperException, IOException, UnsupportedOperationException { + this.master = master; + this.done = false; + + // setup the default procedure coordinator + String name = master.getServerName().toString(); + ThreadPoolExecutor tpool = ProcedureCoordinator.defaultPool(name, 1); + BaseCoordinatedStateManager coordManager = + (BaseCoordinatedStateManager) CoordinatedStateManagerFactory + .getCoordinatedStateManager(master.getConfiguration()); + coordManager.initialize(master); + + ProcedureCoordinatorRpcs comms = + coordManager.getProcedureCoordinatorRpcs(getProcedureSignature(), name); + + this.coordinator = new ProcedureCoordinator(comms, tpool); + } + + @Override + public String getProcedureSignature() { + return ROLLLOG_PROCEDURE_SIGNATURE; + } + + @Override + public void execProcedure(ProcedureDescription desc) throws IOException { + this.done = false; + // start the process on the RS + ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(desc.getInstance()); + List serverNames = master.getServerManager().getOnlineServersList(); + List servers = new ArrayList(); + for (ServerName sn : serverNames) { + servers.add(sn.toString()); + } + + List conf = desc.getConfigurationList(); + byte[] data = new byte[0]; + if(conf.size() > 0){ + // Get backup root path + data = conf.get(0).getValue().getBytes(); + } + Procedure proc = coordinator.startProcedure(monitor, desc.getInstance(), data, servers); + if (proc == null) { + String msg = "Failed to submit distributed procedure for '" + desc.getInstance() + "'"; + LOG.error(msg); + throw new IOException(msg); + } + + try { + // wait for the procedure to complete. A timer thread is kicked off that should cancel this + // if it takes too long. + proc.waitForCompleted(); + LOG.info("Done waiting - exec procedure for " + desc.getInstance()); + LOG.info("Distributed roll log procedure is successful!"); + this.done = true; + } catch (InterruptedException e) { + ForeignException ee = + new ForeignException("Interrupted while waiting for roll log procdure to finish", e); + monitor.receive(ee); + Thread.currentThread().interrupt(); + } catch (ForeignException e) { + ForeignException ee = + new ForeignException("Exception while waiting for roll log procdure to finish", e); + monitor.receive(ee); + } + monitor.rethrowException(); + } + + @Override + public boolean isProcedureDone(ProcedureDescription desc) throws IOException { + return done; + } + +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java new file mode 100644 index 0000000..58cd4b2 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedure.java @@ -0,0 +1,147 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.regionserver; + +import java.util.HashMap; +import java.util.concurrent.Callable; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.errorhandling.ForeignException; +import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; +import org.apache.hadoop.hbase.procedure.ProcedureMember; +import org.apache.hadoop.hbase.procedure.Subprocedure; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.hbase.regionserver.wal.FSHLog; + +/** + * This backup subprocedure implementation forces a log roll on the RS. + */ +public class LogRollBackupSubprocedure extends Subprocedure { + private static final Log LOG = LogFactory.getLog(LogRollBackupSubprocedure.class); + + private final RegionServerServices rss; + private final LogRollBackupSubprocedurePool taskManager; + private FSHLog hlog; + private String backupRoot; + + public LogRollBackupSubprocedure(RegionServerServices rss, ProcedureMember member, + ForeignExceptionDispatcher errorListener, long wakeFrequency, long timeout, + LogRollBackupSubprocedurePool taskManager, byte[] data) { + + super(member, LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, errorListener, + wakeFrequency, timeout); + LOG.info("Constructing a LogRollBackupSubprocedure."); + this.rss = rss; + this.taskManager = taskManager; + if(data != null) { + backupRoot = new String(data); + } + } + + /** + * Callable task. TODO. We don't need a thread pool to execute roll log. This can be simplified + * with no use of subprocedurepool. + */ + class RSRollLogTask implements Callable { + RSRollLogTask() { + } + + @Override + public Void call() throws Exception { + if (LOG.isDebugEnabled()) { + LOG.debug("++ DRPC started: " + rss.getServerName()); + } + hlog = (FSHLog) rss.getWAL(null); + long filenum = hlog.getFilenum(); + + LOG.info("Trying to roll log in backup subprocedure, current log number: " + filenum); + hlog.rollWriter(true); + LOG.info("After roll log in backup subprocedure, current log number: " + hlog.getFilenum()); + + Connection connection = rss.getConnection(); + try(final BackupSystemTable table = new BackupSystemTable(connection)) { + // sanity check, good for testing + HashMap serverTimestampMap = table.readRegionServerLastLogRollResult(backupRoot); + String host = rss.getServerName().getHostname(); + int port = rss.getServerName().getPort(); + String server = host + ":" + port; + Long sts = serverTimestampMap.get(host); + if (sts != null && sts > filenum) { + LOG.warn("Won't update server's last roll log result: current=" + + sts + " new=" + filenum); + return null; + } + // write the log number to hbase:backup. + table.writeRegionServerLastLogRollResult(server, filenum, backupRoot); + return null; + } catch (Exception e) { + LOG.error(e); + throw e; // TODO: is this correct? + } + } + } + + private void rolllog() throws ForeignException { + monitor.rethrowException(); + + taskManager.submitTask(new RSRollLogTask()); + monitor.rethrowException(); + + // wait for everything to complete. + taskManager.waitForOutstandingTasks(); + monitor.rethrowException(); + + } + + @Override + public void acquireBarrier() throws ForeignException { + // do nothing, executing in inside barrier step. + } + + /** + * do a log roll. + * @return some bytes + */ + @Override + public byte[] insideBarrier() throws ForeignException { + rolllog(); + // FIXME + return null; + } + + /** + * Cancel threads if they haven't finished. + */ + @Override + public void cleanup(Exception e) { + taskManager.abort("Aborting log roll subprocedure tasks for backup due to error", e); + } + + /** + * Hooray! + */ + public void releaseBarrier() { + // NO OP + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java new file mode 100644 index 0000000..1ca638c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.regionserver; + +import java.io.Closeable; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorCompletionService; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.DaemonThreadFactory; +import org.apache.hadoop.hbase.errorhandling.ForeignException; + +/** + * Handle running each of the individual tasks for completing a backup procedure + * on a regionserver. + */ +public class LogRollBackupSubprocedurePool implements Closeable, Abortable { + private static final Log LOG = LogFactory.getLog(LogRollBackupSubprocedurePool.class); + + /** Maximum number of concurrent snapshot region tasks that can run concurrently */ + private static final String CONCURENT_BACKUP_TASKS_KEY = "hbase.backup.region.concurrentTasks"; + private static final int DEFAULT_CONCURRENT_BACKUP_TASKS = 3; + + private final ExecutorCompletionService taskPool; + private final ThreadPoolExecutor executor; + private volatile boolean aborted; + private final List> futures = new ArrayList>(); + private final String name; + + public LogRollBackupSubprocedurePool(String name, Configuration conf) { + // configure the executor service + long keepAlive = + conf.getLong(LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_KEY, + LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_DEFAULT); + int threads = conf.getInt(CONCURENT_BACKUP_TASKS_KEY, DEFAULT_CONCURRENT_BACKUP_TASKS); + this.name = name; + executor = + new ThreadPoolExecutor(1, threads, keepAlive, TimeUnit.SECONDS, + new LinkedBlockingQueue(), new DaemonThreadFactory("rs(" + name + + ")-backup-pool")); + taskPool = new ExecutorCompletionService(executor); + } + + /** + * Submit a task to the pool. + */ + public void submitTask(final Callable task) { + Future f = this.taskPool.submit(task); + futures.add(f); + } + + /** + * Wait for all of the currently outstanding tasks submitted via {@link #submitTask(Callable)} + * @return true on success, false otherwise + * @throws ForeignException exception + */ + public boolean waitForOutstandingTasks() throws ForeignException { + LOG.debug("Waiting for backup procedure to finish."); + + try { + for (Future f : futures) { + f.get(); + } + return true; + } catch (InterruptedException e) { + if (aborted) { + throw new ForeignException("Interrupted and found to be aborted while waiting for tasks!", + e); + } + Thread.currentThread().interrupt(); + } catch (ExecutionException e) { + if (e.getCause() instanceof ForeignException) { + throw (ForeignException) e.getCause(); + } + throw new ForeignException(name, e.getCause()); + } finally { + // close off remaining tasks + for (Future f : futures) { + if (!f.isDone()) { + f.cancel(true); + } + } + } + return false; + } + + /** + * Attempt to cleanly shutdown any running tasks - allows currently running tasks to cleanly + * finish + */ + @Override + public void close() { + executor.shutdown(); + } + + @Override + public void abort(String why, Throwable e) { + if (this.aborted) { + return; + } + + this.aborted = true; + LOG.warn("Aborting because: " + why, e); + this.executor.shutdownNow(); + } + + @Override + public boolean isAborted() { + return this.aborted; + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java new file mode 100644 index 0000000..3b77183 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollRegionServerProcedureManager.java @@ -0,0 +1,168 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.regionserver; + + +import java.io.IOException; +import java.util.concurrent.ThreadPoolExecutor; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CoordinatedStateManagerFactory; +import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; +import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; +import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; +import org.apache.hadoop.hbase.procedure.ProcedureMember; +import org.apache.hadoop.hbase.procedure.ProcedureMemberRpcs; +import org.apache.hadoop.hbase.procedure.RegionServerProcedureManager; +import org.apache.hadoop.hbase.procedure.Subprocedure; +import org.apache.hadoop.hbase.procedure.SubprocedureFactory; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; + +/** + * This manager class handles the work dealing with backup for a {@link HRegionServer}. + *

+ * This provides the mechanism necessary to kick off a backup specific {@link Subprocedure} that is + * responsible by this region server. If any failures occur with the subprocedure, the manager's + * procedure member notifies the procedure coordinator to abort all others. + *

+ * On startup, requires {@link #start()} to be called. + *

+ * On shutdown, requires org.apache.hadoop.hbase.procedure.ProcedureMember.close() to be + * called + */ +public class LogRollRegionServerProcedureManager extends RegionServerProcedureManager { + + private static final Log LOG = LogFactory.getLog(LogRollRegionServerProcedureManager.class); + + /** Conf key for number of request threads to start backup on regionservers */ + public static final String BACKUP_REQUEST_THREADS_KEY = "hbase.backup.region.pool.threads"; + /** # of threads for backup work on the rs. */ + public static final int BACKUP_REQUEST_THREADS_DEFAULT = 10; + + public static final String BACKUP_TIMEOUT_MILLIS_KEY = "hbase.backup.timeout"; + public static final long BACKUP_TIMEOUT_MILLIS_DEFAULT = 60000; + + /** Conf key for millis between checks to see if backup work completed or if there are errors */ + public static final String BACKUP_REQUEST_WAKE_MILLIS_KEY = "hbase.backup.region.wakefrequency"; + /** Default amount of time to check for errors while regions finish backup work */ + private static final long BACKUP_REQUEST_WAKE_MILLIS_DEFAULT = 500; + + private RegionServerServices rss; + private ProcedureMemberRpcs memberRpcs; + private ProcedureMember member; + + /** + * Create a default backup procedure manager + */ + public LogRollRegionServerProcedureManager() { + } + + /** + * Start accepting backup procedure requests. + */ + @Override + public void start() { + this.memberRpcs.start(rss.getServerName().toString(), member); + LOG.info("Started region server backup manager."); + } + + /** + * Close this and all running backup procedure tasks + * @param force forcefully stop all running tasks + * @throws IOException exception + */ + @Override + public void stop(boolean force) throws IOException { + String mode = force ? "abruptly" : "gracefully"; + LOG.info("Stopping RegionServerBackupManager " + mode + "."); + + try { + this.member.close(); + } finally { + this.memberRpcs.close(); + } + } + + /** + * If in a running state, creates the specified subprocedure for handling a backup procedure. + * @return Subprocedure to submit to the ProcedureMemeber. + */ + public Subprocedure buildSubprocedure(byte[] data) { + + // don't run a backup if the parent is stop(ping) + if (rss.isStopping() || rss.isStopped()) { + throw new IllegalStateException("Can't start backup procedure on RS: " + rss.getServerName() + + ", because stopping/stopped!"); + } + + LOG.info("Attempting to run a roll log procedure for backup."); + ForeignExceptionDispatcher errorDispatcher = new ForeignExceptionDispatcher(); + Configuration conf = rss.getConfiguration(); + long timeoutMillis = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT); + long wakeMillis = + conf.getLong(BACKUP_REQUEST_WAKE_MILLIS_KEY, BACKUP_REQUEST_WAKE_MILLIS_DEFAULT); + + LogRollBackupSubprocedurePool taskManager = + new LogRollBackupSubprocedurePool(rss.getServerName().toString(), conf); + return new LogRollBackupSubprocedure(rss, member, errorDispatcher, wakeMillis, timeoutMillis, + taskManager, data); + + } + + /** + * Build the actual backup procedure runner that will do all the 'hard' work + */ + public class BackupSubprocedureBuilder implements SubprocedureFactory { + + @Override + public Subprocedure buildSubprocedure(String name, byte[] data) { + return LogRollRegionServerProcedureManager.this.buildSubprocedure(data); + } + } + + @Override + public void initialize(RegionServerServices rss) throws IOException { + this.rss = rss; + BaseCoordinatedStateManager coordManager = + (BaseCoordinatedStateManager) CoordinatedStateManagerFactory.getCoordinatedStateManager(rss + .getConfiguration()); + coordManager.initialize(rss); + this.memberRpcs = + coordManager + .getProcedureMemberRpcs(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE); + + // read in the backup handler configuration properties + Configuration conf = rss.getConfiguration(); + long keepAlive = conf.getLong(BACKUP_TIMEOUT_MILLIS_KEY, BACKUP_TIMEOUT_MILLIS_DEFAULT); + int opThreads = conf.getInt(BACKUP_REQUEST_THREADS_KEY, BACKUP_REQUEST_THREADS_DEFAULT); + // create the actual cohort member + ThreadPoolExecutor pool = + ProcedureMember.defaultPool(rss.getServerName().toString(), opThreads, keepAlive); + this.member = new ProcedureMember(memberRpcs, pool, new BackupSubprocedureBuilder()); + } + + @Override + public String getProcedureSignature() { + return "backup-proc"; + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupServerUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupServerUtil.java new file mode 100644 index 0000000..25025e2 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/BackupServerUtil.java @@ -0,0 +1,492 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.util; + +import java.io.IOException; +import java.io.InterruptedIOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; +import java.util.TreeMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.impl.BackupException; +import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; +import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; + +/** + * A collection for methods used by multiple classes to backup HBase tables. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class BackupServerUtil { + protected static final Log LOG = LogFactory.getLog(BackupServerUtil.class); + public static final String LOGNAME_SEPARATOR = "."; + + private BackupServerUtil(){ + throw new AssertionError("Instantiating utility class..."); + } + + public static void waitForSnapshot(SnapshotDescription snapshot, long max, + SnapshotManager snapshotMgr, Configuration conf) throws IOException { + boolean done = false; + long start = EnvironmentEdgeManager.currentTime(); + int numRetries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); + long maxPauseTime = max / numRetries; + int tries = 0; + LOG.debug("Waiting a max of " + max + " ms for snapshot '" + + ClientSnapshotDescriptionUtils.toString(snapshot) + "'' to complete. (max " + + maxPauseTime + " ms per retry)"); + while (tries == 0 + || ((EnvironmentEdgeManager.currentTime() - start) < max && !done)) { + try { + // sleep a backoff <= pauseTime amount + long pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, + HConstants.DEFAULT_HBASE_CLIENT_PAUSE); + long sleep = HBaseAdmin.getPauseTime(tries++, pause); + sleep = sleep > maxPauseTime ? maxPauseTime : sleep; + LOG.debug("(#" + tries + ") Sleeping: " + sleep + + "ms while waiting for snapshot completion."); + Thread.sleep(sleep); + } catch (InterruptedException e) { + throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e); + } + LOG.debug("Getting current status of snapshot ..."); + done = snapshotMgr.isSnapshotDone(snapshot); + } + if (!done) { + throw new SnapshotCreationException("Snapshot '" + snapshot.getName() + + "' wasn't completed in expectedTime:" + max + " ms", + ProtobufUtil.createSnapshotDesc(snapshot)); + } + } + + /** + * Loop through the RS log timestamp map for the tables, for each RS, find the min timestamp + * value for the RS among the tables. + * @param rsLogTimestampMap timestamp map + * @return the min timestamp of each RS + */ + public static HashMap getRSLogTimestampMins( + HashMap> rsLogTimestampMap) { + + if (rsLogTimestampMap == null || rsLogTimestampMap.isEmpty()) { + return null; + } + + HashMap rsLogTimestampMins = new HashMap(); + HashMap> rsLogTimestampMapByRS = + new HashMap>(); + + for (Entry> tableEntry : rsLogTimestampMap.entrySet()) { + TableName table = tableEntry.getKey(); + HashMap rsLogTimestamp = tableEntry.getValue(); + for (Entry rsEntry : rsLogTimestamp.entrySet()) { + String rs = rsEntry.getKey(); + Long ts = rsEntry.getValue(); + if (!rsLogTimestampMapByRS.containsKey(rs)) { + rsLogTimestampMapByRS.put(rs, new HashMap()); + rsLogTimestampMapByRS.get(rs).put(table, ts); + } else { + rsLogTimestampMapByRS.get(rs).put(table, ts); + } + } + } + + for (Entry> entry : rsLogTimestampMapByRS.entrySet()) { + String rs = entry.getKey(); + rsLogTimestampMins.put(rs, BackupClientUtil.getMinValue(entry.getValue())); + } + + return rsLogTimestampMins; + } + + /** + * copy out Table RegionInfo into incremental backup image need to consider move this logic into + * HBackupFileSystem + * @param backupContext backup context + * @param conf configuration + * @throws IOException exception + * @throws InterruptedException exception + */ + public static void copyTableRegionInfo(BackupInfo backupContext, Configuration conf) + throws IOException, InterruptedException { + + Path rootDir = FSUtils.getRootDir(conf); + FileSystem fs = rootDir.getFileSystem(conf); + + // for each table in the table set, copy out the table info and region + // info files in the correct directory structure + try (Connection conn = ConnectionFactory.createConnection(conf); + Admin admin = conn.getAdmin()) { + + for (TableName table : backupContext.getTables()) { + + if(!admin.tableExists(table)) { + LOG.warn("Table "+ table+" does not exists, skipping it."); + continue; + } + LOG.debug("Attempting to copy table info for:" + table); + HTableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table); + + // write a copy of descriptor to the target directory + Path target = new Path(backupContext.getBackupStatus(table).getTargetDir()); + FileSystem targetFs = target.getFileSystem(conf); + FSTableDescriptors descriptors = + new FSTableDescriptors(conf, targetFs, FSUtils.getRootDir(conf)); + descriptors.createTableDescriptorForTableDirectory(target, orig, false); + LOG.debug("Finished copying tableinfo."); + List regions = null; + regions = admin.getTableRegions(table); + // For each region, write the region info to disk + LOG.debug("Starting to write region info for table " + table); + for (HRegionInfo regionInfo : regions) { + Path regionDir = + HRegion.getRegionDir(new Path(backupContext.getBackupStatus(table).getTargetDir()), + regionInfo); + regionDir = + new Path(backupContext.getBackupStatus(table).getTargetDir(), regionDir.getName()); + writeRegioninfoOnFilesystem(conf, targetFs, regionDir, regionInfo); + } + LOG.debug("Finished writing region info for table " + table); + } + } catch (IOException e) { + throw new BackupException(e); + } + } + + /** + * Write the .regioninfo file on-disk. + */ + public static void writeRegioninfoOnFilesystem(final Configuration conf, final FileSystem fs, + final Path regionInfoDir, HRegionInfo regionInfo) throws IOException { + final byte[] content = regionInfo.toDelimitedByteArray(); + Path regionInfoFile = new Path(regionInfoDir, ".regioninfo"); + // First check to get the permissions + FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); + // Write the RegionInfo file content + FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null); + try { + out.write(content); + } finally { + out.close(); + } + } + + /** + * TODO: return hostname:port + * @param p + * @return host name: port + * @throws IOException + */ + public static String parseHostNameFromLogFile(Path p) { + try { + if (isArchivedLogFile(p)) { + return BackupClientUtil.parseHostFromOldLog(p); + } else { + ServerName sname = AbstractFSWALProvider.getServerNameFromWALDirectoryName(p); + return sname.getHostname() + ":" + sname.getPort(); + } + } catch (Exception e) { + LOG.warn("Skip log file (can't parse): " + p); + return null; + } + } + + private static boolean isArchivedLogFile(Path p) { + String oldLog = Path.SEPARATOR + HConstants.HREGION_OLDLOGDIR_NAME + Path.SEPARATOR; + return p.toString().contains(oldLog); + } + + /** + * Returns WAL file name + * @param walFileName WAL file name + * @return WAL file name + * @throws IOException exception + * @throws IllegalArgumentException exception + */ + public static String getUniqueWALFileNamePart(String walFileName) throws IOException { + return getUniqueWALFileNamePart(new Path(walFileName)); + } + + /** + * Returns WAL file name + * @param p - WAL file path + * @return WAL file name + * @throws IOException exception + */ + public static String getUniqueWALFileNamePart(Path p) throws IOException { + return p.getName(); + } + + /** + * Get the total length of files under the given directory recursively. + * @param fs The hadoop file system + * @param dir The target directory + * @return the total length of files + * @throws IOException exception + */ + public static long getFilesLength(FileSystem fs, Path dir) throws IOException { + long totalLength = 0; + FileStatus[] files = FSUtils.listStatus(fs, dir); + if (files != null) { + for (FileStatus fileStatus : files) { + if (fileStatus.isDirectory()) { + totalLength += getFilesLength(fs, fileStatus.getPath()); + } else { + totalLength += fileStatus.getLen(); + } + } + } + return totalLength; + } + + + + /** + * Sort history list by start time in descending order. + * @param historyList history list + * @return sorted list of BackupCompleteData + */ + public static ArrayList sortHistoryListDesc( + ArrayList historyList) { + ArrayList list = new ArrayList(); + TreeMap map = new TreeMap(); + for (BackupInfo h : historyList) { + map.put(Long.toString(h.getStartTs()), h); + } + Iterator i = map.descendingKeySet().iterator(); + while (i.hasNext()) { + list.add(map.get(i.next())); + } + return list; + } + + /** + * Get list of all WAL files (WALs and archive) + * @param c - configuration + * @return list of WAL files + * @throws IOException exception + */ + public static List getListOfWALFiles(Configuration c) throws IOException { + Path rootDir = FSUtils.getRootDir(c); + Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME); + Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); + List logFiles = new ArrayList(); + + FileSystem fs = FileSystem.get(c); + logFiles = BackupClientUtil.getFiles(fs, logDir, logFiles, null); + logFiles = BackupClientUtil.getFiles(fs, oldLogDir, logFiles, null); + return logFiles; + } + + /** + * Get list of all WAL files (WALs and archive) + * @param c - configuration + * @return list of WAL files + * @throws IOException exception + */ + public static List getListOfWALFiles(Configuration c, PathFilter filter) + throws IOException { + Path rootDir = FSUtils.getRootDir(c); + Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME); + Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); + List logFiles = new ArrayList(); + + FileSystem fs = FileSystem.get(c); + logFiles = BackupClientUtil.getFiles(fs, logDir, logFiles, filter); + logFiles = BackupClientUtil.getFiles(fs, oldLogDir, logFiles, filter); + return logFiles; + } + + /** + * Get list of all old WAL files (WALs and archive) + * @param c - configuration + * @param hostTimestampMap - host timestamp map + * @return list of WAL files + * @throws IOException exception + */ + public static List getWALFilesOlderThan(final Configuration c, + final HashMap hostTimestampMap) throws IOException { + Path rootDir = FSUtils.getRootDir(c); + Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME); + Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); + List logFiles = new ArrayList(); + + PathFilter filter = new PathFilter() { + + @Override + public boolean accept(Path p) { + try { + if (AbstractFSWALProvider.isMetaFile(p)) { + return false; + } + String host = parseHostNameFromLogFile(p); + if(host == null) { + return false; + } + Long oldTimestamp = hostTimestampMap.get(host); + Long currentLogTS = BackupClientUtil.getCreationTime(p); + return currentLogTS <= oldTimestamp; + } catch (Exception e) { + LOG.warn("Can not parse"+ p, e); + return false; + } + } + }; + FileSystem fs = FileSystem.get(c); + logFiles = BackupClientUtil.getFiles(fs, logDir, logFiles, filter); + logFiles = BackupClientUtil.getFiles(fs, oldLogDir, logFiles, filter); + return logFiles; + } + + public static String join(TableName[] names) { + StringBuilder sb = new StringBuilder(); + String sep = BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND; + for (TableName s : names) { + sb.append(sep).append(s.getNameAsString()); + } + return sb.toString(); + } + + public static TableName[] parseTableNames(String tables) { + if (tables == null) { + return null; + } + String[] tableArray = tables.split(BackupRestoreConstants.TABLENAME_DELIMITER_IN_COMMAND); + + TableName[] ret = new TableName[tableArray.length]; + for (int i = 0; i < tableArray.length; i++) { + ret[i] = TableName.valueOf(tableArray[i]); + } + return ret; + } + + public static void cleanupBackupData(BackupInfo context, Configuration conf) + throws IOException + { + cleanupHLogDir(context, conf); + cleanupTargetDir(context, conf); + } + + /** + * Clean up directories which are generated when DistCp copying hlogs. + * @throws IOException + */ + private static void cleanupHLogDir(BackupInfo backupContext, Configuration conf) + throws IOException { + + String logDir = backupContext.getHLogTargetDir(); + if (logDir == null) { + LOG.warn("No log directory specified for " + backupContext.getBackupId()); + return; + } + + Path rootPath = new Path(logDir).getParent(); + FileSystem fs = FileSystem.get(rootPath.toUri(), conf); + FileStatus[] files = FSUtils.listStatus(fs, rootPath); + if (files == null) { + return; + } + for (FileStatus file : files) { + LOG.debug("Delete log files: " + file.getPath().getName()); + if(!FSUtils.delete(fs, file.getPath(), true)) { + LOG.warn("Could not delete files in "+ file.getPath()); + }; + } + } + + /** + * Clean up the data at target directory + */ + private static void cleanupTargetDir(BackupInfo backupContext, Configuration conf) { + try { + // clean up the data at target directory + LOG.debug("Trying to cleanup up target dir : " + backupContext.getBackupId()); + String targetDir = backupContext.getTargetRootDir(); + if (targetDir == null) { + LOG.warn("No target directory specified for " + backupContext.getBackupId()); + return; + } + + FileSystem outputFs = + FileSystem.get(new Path(backupContext.getTargetRootDir()).toUri(), conf); + + for (TableName table : backupContext.getTables()) { + Path targetDirPath = + new Path(HBackupFileSystem.getTableBackupDir(backupContext.getTargetRootDir(), + backupContext.getBackupId(), table)); + if (outputFs.delete(targetDirPath, true)) { + LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done."); + } else { + LOG.info("No data has been found in " + targetDirPath.toString() + "."); + } + + Path tableDir = targetDirPath.getParent(); + FileStatus[] backups = FSUtils.listStatus(outputFs, tableDir); + if (backups == null || backups.length == 0) { + if(outputFs.delete(tableDir, true)){ + LOG.debug(tableDir.toString() + " is empty, remove it."); + } else { + LOG.warn("Could not delete "+ tableDir); + } + } + } + + } catch (IOException e1) { + LOG.error("Cleaning up backup data of " + backupContext.getBackupId() + " at " + + backupContext.getTargetRootDir() + " failed due to " + e1.getMessage() + "."); + } + } + + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java new file mode 100644 index 0000000..d1c5309 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java @@ -0,0 +1,618 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.util; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.TreeMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupRestoreServerFactory; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.IncrementalRestoreService; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.io.HFileLink; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; +import org.apache.hadoop.hbase.regionserver.HStore; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.snapshot.SnapshotManifest; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * A collection for methods used by multiple classes to restore HBase tables. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class RestoreServerUtil { + + public static final Log LOG = LogFactory.getLog(RestoreServerUtil.class); + + private final String[] ignoreDirs = { "recovered.edits" }; + + protected Configuration conf = null; + + protected Path backupRootPath; + + protected String backupId; + + protected FileSystem fs; + private final String RESTORE_TMP_PATH = "/tmp"; + private final Path restoreTmpPath; + + // store table name and snapshot dir mapping + private final HashMap snapshotMap = new HashMap<>(); + + public RestoreServerUtil(Configuration conf, final Path backupRootPath, final String backupId) + throws IOException { + this.conf = conf; + this.backupRootPath = backupRootPath; + this.backupId = backupId; + this.fs = backupRootPath.getFileSystem(conf); + this.restoreTmpPath = new Path(conf.get("hbase.fs.tmp.dir") != null? + conf.get("hbase.fs.tmp.dir"): RESTORE_TMP_PATH, + "restore"); + } + + /** + * return value represent path for: + * ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/archive/data/default/t1_dn" + * @param tabelName table name + * @return path to table archive + * @throws IOException exception + */ + Path getTableArchivePath(TableName tableName) + throws IOException { + Path baseDir = new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, + backupId), HConstants.HFILE_ARCHIVE_DIRECTORY); + Path dataDir = new Path(baseDir, HConstants.BASE_NAMESPACE_DIR); + Path archivePath = new Path(dataDir, tableName.getNamespaceAsString()); + Path tableArchivePath = + new Path(archivePath, tableName.getQualifierAsString()); + if (!fs.exists(tableArchivePath) || !fs.getFileStatus(tableArchivePath).isDirectory()) { + LOG.debug("Folder tableArchivePath: " + tableArchivePath.toString() + " does not exists"); + tableArchivePath = null; // empty table has no archive + } + return tableArchivePath; + } + + /** + * Gets region list + * @param tableName table name + * @return RegionList region list + * @throws FileNotFoundException exception + * @throws IOException exception + */ + ArrayList getRegionList(TableName tableName) + throws FileNotFoundException, IOException { + Path tableArchivePath = this.getTableArchivePath(tableName); + ArrayList regionDirList = new ArrayList(); + FileStatus[] children = fs.listStatus(tableArchivePath); + for (FileStatus childStatus : children) { + // here child refer to each region(Name) + Path child = childStatus.getPath(); + regionDirList.add(child); + } + return regionDirList; + } + + /** + * During incremental backup operation. Call WalPlayer to replay WAL in backup image Currently + * tableNames and newTablesNames only contain single table, will be expanded to multiple tables in + * the future + * @param logDirs : incremental backup folders, which contains WAL + * @param tableNames : source tableNames(table names were backuped) + * @param newTableNames : target tableNames(table names to be restored to) + * @throws IOException exception + */ + public void incrementalRestoreTable(Path[] logDirs, + TableName[] tableNames, TableName[] newTableNames) throws IOException { + + if (tableNames.length != newTableNames.length) { + throw new IOException("Number of source tables and target tables does not match!"); + } + + // for incremental backup image, expect the table already created either by user or previous + // full backup. Here, check that all new tables exists + try (Connection conn = ConnectionFactory.createConnection(conf); + Admin admin = conn.getAdmin()) { + for (TableName tableName : newTableNames) { + if (!admin.tableExists(tableName)) { + admin.close(); + throw new IOException("HBase table " + tableName + + " does not exist. Create the table first, e.g. by restoring a full backup."); + } + } + IncrementalRestoreService restoreService = + BackupRestoreServerFactory.getIncrementalRestoreService(conf); + + restoreService.run(logDirs, tableNames, newTableNames); + } + } + + public void fullRestoreTable(Path tableBackupPath, TableName tableName, TableName newTableName, + boolean converted, boolean truncateIfExists) throws IOException { + restoreTableAndCreate(tableName, newTableName, tableBackupPath, converted, truncateIfExists); + } + + /** + * return value represent path for: + * ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/.hbase-snapshot" + * @param backupRootPath backup root path + * @param tableName table name + * @param backupId backup Id + * @return path for snapshot + */ + static Path getTableSnapshotPath(Path backupRootPath, TableName tableName, + String backupId) { + return new Path(HBackupFileSystem.getTableBackupPath(tableName, backupRootPath, backupId), + HConstants.SNAPSHOT_DIR_NAME); + } + + /** + * return value represent path for: + * "..../default/t1_dn/backup_1396650096738/.hbase-snapshot/snapshot_1396650097621_default_t1_dn" + * this path contains .snapshotinfo, .tabledesc (0.96 and 0.98) this path contains .snapshotinfo, + * .data.manifest (trunk) + * @param tableName table name + * @return path to table info + * @throws FileNotFoundException exception + * @throws IOException exception + */ + Path getTableInfoPath(TableName tableName) + throws FileNotFoundException, IOException { + Path tableSnapShotPath = getTableSnapshotPath(backupRootPath, tableName, backupId); + Path tableInfoPath = null; + + // can't build the path directly as the timestamp values are different + FileStatus[] snapshots = fs.listStatus(tableSnapShotPath); + for (FileStatus snapshot : snapshots) { + tableInfoPath = snapshot.getPath(); + // SnapshotManifest.DATA_MANIFEST_NAME = "data.manifest"; + if (tableInfoPath.getName().endsWith("data.manifest")) { + break; + } + } + return tableInfoPath; + } + + /** + * @param tableName is the table backed up + * @return {@link HTableDescriptor} saved in backup image of the table + */ + HTableDescriptor getTableDesc(TableName tableName) + throws FileNotFoundException, IOException { + Path tableInfoPath = this.getTableInfoPath(tableName); + SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath); + SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc); + HTableDescriptor tableDescriptor = manifest.getTableDescriptor(); + if (!tableDescriptor.getTableName().equals(tableName)) { + LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: " + + tableInfoPath.toString()); + LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getNameAsString()); + throw new FileNotFoundException("couldn't find Table Desc for table: " + tableName + + " under tableInfoPath: " + tableInfoPath.toString()); + } + return tableDescriptor; + } + + /** + * Duplicate the backup image if it's on local cluster + * @see HStore#bulkLoadHFile(String, long) + * @see HRegionFileSystem#bulkLoadStoreFile(String familyName, Path srcPath, long seqNum) + * @param tableArchivePath archive path + * @return the new tableArchivePath + * @throws IOException exception + */ + Path checkLocalAndBackup(Path tableArchivePath) throws IOException { + // Move the file if it's on local cluster + boolean isCopyNeeded = false; + + FileSystem srcFs = tableArchivePath.getFileSystem(conf); + FileSystem desFs = FileSystem.get(conf); + if (tableArchivePath.getName().startsWith("/")) { + isCopyNeeded = true; + } else { + // This should match what is done in @see HRegionFileSystem#bulkLoadStoreFile(String, Path, + // long) + if (srcFs.getUri().equals(desFs.getUri())) { + LOG.debug("cluster hold the backup image: " + srcFs.getUri() + "; local cluster node: " + + desFs.getUri()); + isCopyNeeded = true; + } + } + if (isCopyNeeded) { + LOG.debug("File " + tableArchivePath + " on local cluster, back it up before restore"); + if (desFs.exists(restoreTmpPath)) { + try { + desFs.delete(restoreTmpPath, true); + } catch (IOException e) { + LOG.debug("Failed to delete path: " + restoreTmpPath + + ", need to check whether restore target DFS cluster is healthy"); + } + } + FileUtil.copy(srcFs, tableArchivePath, desFs, restoreTmpPath, false, conf); + LOG.debug("Copied to temporary path on local cluster: " + restoreTmpPath); + tableArchivePath = restoreTmpPath; + } + return tableArchivePath; + } + + private void restoreTableAndCreate(TableName tableName, TableName newTableName, + Path tableBackupPath, boolean converted, boolean truncateIfExists) throws IOException { + if (newTableName == null) { + newTableName = tableName; + } + + FileSystem fileSys = tableBackupPath.getFileSystem(this.conf); + + // get table descriptor first + HTableDescriptor tableDescriptor = null; + + Path tableSnapshotPath = getTableSnapshotPath(backupRootPath, tableName, backupId); + + if (fileSys.exists(tableSnapshotPath)) { + // snapshot path exist means the backup path is in HDFS + // check whether snapshot dir already recorded for target table + if (snapshotMap.get(tableName) != null) { + SnapshotDescription desc = + SnapshotDescriptionUtils.readSnapshotInfo(fileSys, tableSnapshotPath); + SnapshotManifest manifest = SnapshotManifest.open(conf, fileSys, tableSnapshotPath, desc); + tableDescriptor = manifest.getTableDescriptor(); + } else { + tableDescriptor = getTableDesc(tableName); + snapshotMap.put(tableName, getTableInfoPath(tableName)); + } + if (tableDescriptor == null) { + LOG.debug("Found no table descriptor in the snapshot dir, previous schema would be lost"); + } + } else if (converted) { + // first check if this is a converted backup image + LOG.error("convert will be supported in a future jira"); + } + + Path tableArchivePath = getTableArchivePath(tableName); + if (tableArchivePath == null) { + if (tableDescriptor != null) { + // find table descriptor but no archive dir means the table is empty, create table and exit + if(LOG.isDebugEnabled()) { + LOG.debug("find table descriptor but no archive dir for table " + tableName + + ", will only create table"); + } + tableDescriptor.setName(newTableName); + checkAndCreateTable(tableBackupPath, tableName, newTableName, null, + tableDescriptor, truncateIfExists); + return; + } else { + throw new IllegalStateException("Cannot restore hbase table because directory '" + + " tableArchivePath is null."); + } + } + + if (tableDescriptor == null) { + tableDescriptor = new HTableDescriptor(newTableName); + } else { + tableDescriptor.setName(newTableName); + } + + if (!converted) { + // record all region dirs: + // load all files in dir + try { + ArrayList regionPathList = getRegionList(tableName); + + // should only try to create the table with all region informations, so we could pre-split + // the regions in fine grain + checkAndCreateTable(tableBackupPath, tableName, newTableName, regionPathList, + tableDescriptor, truncateIfExists); + if (tableArchivePath != null) { + // start real restore through bulkload + // if the backup target is on local cluster, special action needed + Path tempTableArchivePath = checkLocalAndBackup(tableArchivePath); + if (tempTableArchivePath.equals(tableArchivePath)) { + if(LOG.isDebugEnabled()) { + LOG.debug("TableArchivePath for bulkload using existPath: " + tableArchivePath); + } + } else { + regionPathList = getRegionList(tempTableArchivePath); // point to the tempDir + if(LOG.isDebugEnabled()) { + LOG.debug("TableArchivePath for bulkload using tempPath: " + tempTableArchivePath); + } + } + + LoadIncrementalHFiles loader = createLoader(tempTableArchivePath, false); + for (Path regionPath : regionPathList) { + String regionName = regionPath.toString(); + if(LOG.isDebugEnabled()) { + LOG.debug("Restoring HFiles from directory " + regionName); + } + String[] args = { regionName, newTableName.getNameAsString()}; + loader.run(args); + } + } + // we do not recovered edits + } catch (Exception e) { + throw new IllegalStateException("Cannot restore hbase table", e); + } + } else { + LOG.debug("convert will be supported in a future jira"); + } + } + + /** + * Gets region list + * @param tableArchivePath table archive path + * @return RegionList region list + * @throws FileNotFoundException exception + * @throws IOException exception + */ + ArrayList getRegionList(Path tableArchivePath) throws FileNotFoundException, + IOException { + ArrayList regionDirList = new ArrayList(); + FileStatus[] children = fs.listStatus(tableArchivePath); + for (FileStatus childStatus : children) { + // here child refer to each region(Name) + Path child = childStatus.getPath(); + regionDirList.add(child); + } + return regionDirList; + } + + /** + * Counts the number of files in all subdirectories of an HBase table, i.e. HFiles. + * @param regionPath Path to an HBase table directory + * @return the number of files all directories + * @throws IOException exception + */ + int getNumberOfFilesInDir(Path regionPath) throws IOException { + int result = 0; + + if (!fs.exists(regionPath) || !fs.getFileStatus(regionPath).isDirectory()) { + throw new IllegalStateException("Cannot restore hbase table because directory '" + + regionPath.toString() + "' is not a directory."); + } + + FileStatus[] tableDirContent = fs.listStatus(regionPath); + for (FileStatus subDirStatus : tableDirContent) { + FileStatus[] colFamilies = fs.listStatus(subDirStatus.getPath()); + for (FileStatus colFamilyStatus : colFamilies) { + FileStatus[] colFamilyContent = fs.listStatus(colFamilyStatus.getPath()); + result += colFamilyContent.length; + } + } + return result; + } + + /** + * Counts the number of files in all subdirectories of an HBase tables, i.e. HFiles. And finds the + * maximum number of files in one HBase table. + * @param tableArchivePath archive path + * @return the maximum number of files found in 1 HBase table + * @throws IOException exception + */ + int getMaxNumberOfFilesInSubDir(Path tableArchivePath) throws IOException { + int result = 1; + ArrayList regionPathList = getRegionList(tableArchivePath); + // tableArchivePath = this.getTableArchivePath(tableName); + + if (regionPathList == null || regionPathList.size() == 0) { + throw new IllegalStateException("Cannot restore hbase table because directory '" + + tableArchivePath + "' is not a directory."); + } + + for (Path regionPath : regionPathList) { + result = Math.max(result, getNumberOfFilesInDir(regionPath)); + } + return result; + } + + /** + * Create a {@link LoadIncrementalHFiles} instance to be used to restore the HFiles of a full + * backup. + * @return the {@link LoadIncrementalHFiles} instance + * @throws IOException exception + */ + private LoadIncrementalHFiles createLoader(Path tableArchivePath, boolean multipleTables) + throws IOException { + // set configuration for restore: + // LoadIncrementalHFile needs more time + // hbase.rpc.timeout 600000 + // calculates + Integer milliSecInMin = 60000; + Integer previousMillis = this.conf.getInt("hbase.rpc.timeout", 0); + Integer numberOfFilesInDir = + multipleTables ? getMaxNumberOfFilesInSubDir(tableArchivePath) : + getNumberOfFilesInDir(tableArchivePath); + Integer calculatedMillis = numberOfFilesInDir * milliSecInMin; // 1 minute per file + Integer resultMillis = Math.max(calculatedMillis, previousMillis); + if (resultMillis > previousMillis) { + LOG.info("Setting configuration for restore with LoadIncrementalHFile: " + + "hbase.rpc.timeout to " + calculatedMillis / milliSecInMin + + " minutes, to handle the number of files in backup " + tableArchivePath); + this.conf.setInt("hbase.rpc.timeout", resultMillis); + } + + // By default, it is 32 and loader will fail if # of files in any region exceed this + // limit. Bad for snapshot restore. + this.conf.setInt(LoadIncrementalHFiles.MAX_FILES_PER_REGION_PER_FAMILY, Integer.MAX_VALUE); + LoadIncrementalHFiles loader = null; + try { + loader = new LoadIncrementalHFiles(this.conf); + } catch (Exception e1) { + throw new IOException(e1); + } + return loader; + } + + /** + * Calculate region boundaries and add all the column families to the table descriptor + * @param regionDirList region dir list + * @return a set of keys to store the boundaries + */ + byte[][] generateBoundaryKeys(ArrayList regionDirList) + throws FileNotFoundException, IOException { + TreeMap map = new TreeMap(Bytes.BYTES_COMPARATOR); + // Build a set of keys to store the boundaries + byte[][] keys = null; + // calculate region boundaries and add all the column families to the table descriptor + for (Path regionDir : regionDirList) { + LOG.debug("Parsing region dir: " + regionDir); + Path hfofDir = regionDir; + + if (!fs.exists(hfofDir)) { + LOG.warn("HFileOutputFormat dir " + hfofDir + " not found"); + } + + FileStatus[] familyDirStatuses = fs.listStatus(hfofDir); + if (familyDirStatuses == null) { + throw new IOException("No families found in " + hfofDir); + } + + for (FileStatus stat : familyDirStatuses) { + if (!stat.isDirectory()) { + LOG.warn("Skipping non-directory " + stat.getPath()); + continue; + } + boolean isIgnore = false; + String pathName = stat.getPath().getName(); + for (String ignore : ignoreDirs) { + if (pathName.contains(ignore)) { + LOG.warn("Skipping non-family directory" + pathName); + isIgnore = true; + break; + } + } + if (isIgnore) { + continue; + } + Path familyDir = stat.getPath(); + LOG.debug("Parsing family dir [" + familyDir.toString() + " in region [" + regionDir + "]"); + // Skip _logs, etc + if (familyDir.getName().startsWith("_") || familyDir.getName().startsWith(".")) { + continue; + } + + // start to parse hfile inside one family dir + Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir)); + for (Path hfile : hfiles) { + if (hfile.getName().startsWith("_") || hfile.getName().startsWith(".") + || StoreFileInfo.isReference(hfile.getName()) + || HFileLink.isHFileLink(hfile.getName())) { + continue; + } + HFile.Reader reader = HFile.createReader(fs, hfile, new CacheConfig(conf), conf); + final byte[] first, last; + try { + reader.loadFileInfo(); + first = reader.getFirstRowKey(); + last = reader.getLastRowKey(); + LOG.debug("Trying to figure out region boundaries hfile=" + hfile + " first=" + + Bytes.toStringBinary(first) + " last=" + Bytes.toStringBinary(last)); + + // To eventually infer start key-end key boundaries + Integer value = map.containsKey(first) ? (Integer) map.get(first) : 0; + map.put(first, value + 1); + value = map.containsKey(last) ? (Integer) map.get(last) : 0; + map.put(last, value - 1); + } finally { + reader.close(); + } + } + } + } + keys = LoadIncrementalHFiles.inferBoundaries(map); + return keys; + } + + /** + * Prepare the table for bulkload, most codes copied from + * {@link LoadIncrementalHFiles#createTable(String, String)} + * @param tableBackupPath path + * @param tableName table name + * @param targetTableName target table name + * @param regionDirList region directory list + * @param htd table descriptor + * @throws IOException exception + */ + private void checkAndCreateTable(Path tableBackupPath, TableName tableName, + TableName targetTableName, ArrayList regionDirList, + HTableDescriptor htd, boolean truncateIfExists) + throws IOException { + HBaseAdmin hbadmin = null; + Connection conn = null; + try { + conn = ConnectionFactory.createConnection(conf); + hbadmin = (HBaseAdmin) conn.getAdmin(); + boolean createNew = false; + if (hbadmin.tableExists(targetTableName)) { + if(truncateIfExists) { + LOG.info("Truncating exising target table '" + targetTableName + + "', preserving region splits"); + hbadmin.disableTable(targetTableName); + hbadmin.truncateTable(targetTableName, true); + } else{ + LOG.info("Using exising target table '" + targetTableName + "'"); + } + } else { + createNew = true; + } + if(createNew){ + LOG.info("Creating target table '" + targetTableName + "'"); + // if no region directory given, create the table and return + if (regionDirList == null || regionDirList.size() == 0) { + hbadmin.createTable(htd); + return; + } + byte[][] keys = generateBoundaryKeys(regionDirList); + // create table using table descriptor and region boundaries + hbadmin.createTable(htd, keys); + } + } catch (Exception e) { + throw new IOException(e); + } finally { + if (hbadmin != null) { + hbadmin.close(); + } + if(conn != null){ + conn.close(); + } + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LogUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LogUtils.java new file mode 100644 index 0000000..26f261c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/LogUtils.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import org.apache.commons.logging.Log; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; + +public final class LogUtils { + + private LogUtils() { + } + /** + * Disables Zk- and HBase client logging + * @param log + */ + public static void disableUselessLoggers(Log log) { + // disable zookeeper log to avoid it mess up command output + Logger zkLogger = Logger.getLogger("org.apache.zookeeper"); + zkLogger.setLevel(Level.OFF); + // disable hbase zookeeper tool log to avoid it mess up command output + Logger hbaseZkLogger = Logger.getLogger("org.apache.hadoop.hbase.zookeeper"); + hbaseZkLogger.setLevel(Level.OFF); + // disable hbase client log to avoid it mess up command output + Logger hbaseClientLogger = Logger.getLogger("org.apache.hadoop.hbase.client"); + hbaseClientLogger.setLevel(Level.OFF); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupAdmin.java new file mode 100644 index 0000000..0ec8b5d --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupAdmin.java @@ -0,0 +1,186 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.BackupAdmin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.hamcrest.CoreMatchers; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestBackupAdmin extends TestBackupBase { + private static final Log LOG = LogFactory.getLog(TestBackupAdmin.class); + //implement all test cases in 1 test since incremental backup/restore has dependencies + @Test + public void TestIncBackupRestoreWithAdminAPI() throws Exception { + // #1 - create full backup for all tables + LOG.info("create full backup image for all tables"); + + List tables = Lists.newArrayList(table1, table2, table3, table4); + HBaseAdmin admin = null; + BackupAdmin backupAdmin = null; + Connection conn = ConnectionFactory.createConnection(conf1); + admin = (HBaseAdmin) conn.getAdmin(); + backupAdmin = admin.getBackupAdmin(); + BackupRequest request = new BackupRequest(); + request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdFull = backupAdmin.backupTables(request); + + assertTrue(checkSucceeded(backupIdFull)); + + // #2 - insert some data to table + HTable t1 = (HTable) conn.getTable(table1); + Put p1; + for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { + p1 = new Put(Bytes.toBytes("row-t1" + i)); + p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t1.put(p1); + } + + Assert.assertThat(TEST_UTIL.countRows(t1), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2)); + t1.close(); + + HTable t2 = (HTable) conn.getTable(table2); + Put p2; + for (int i = 0; i < 5; i++) { + p2 = new Put(Bytes.toBytes("row-t2" + i)); + p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t2.put(p2); + } + + Assert.assertThat(TEST_UTIL.countRows(t2), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5)); + t2.close(); + + // #3 - incremental backup for multiple tables + tables = Lists.newArrayList(table1, table2, table3); + request = new BackupRequest(); + request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) + .setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdIncMultiple = backupAdmin.backupTables(request); + assertTrue(checkSucceeded(backupIdIncMultiple)); + + // #4 - restore full backup for all tables, without overwrite + TableName[] tablesRestoreFull = + new TableName[] { table1, table2, table3, table4 }; + + TableName[] tablesMapFull = + new TableName[] { table1_restore, table2_restore, table3_restore, table4_restore }; + + RestoreRequest restoreRequest = new RestoreRequest(); + restoreRequest.setBackupRootDir(BACKUP_ROOT_DIR).setBackupId(backupIdFull). + setCheck(false).setOverwrite(false). + setFromTables(tablesRestoreFull).setToTables(tablesMapFull); + + backupAdmin.restore(restoreRequest); + + // #5.1 - check tables for full restore + + assertTrue(admin.tableExists(table1_restore)); + assertTrue(admin.tableExists(table2_restore)); + assertTrue(admin.tableExists(table3_restore)); + assertTrue(admin.tableExists(table4_restore)); + + + // #5.2 - checking row count of tables for full restore + HTable hTable = (HTable) conn.getTable(table1_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH)); + hTable.close(); + + hTable = (HTable) conn.getTable(table2_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH)); + hTable.close(); + + hTable = (HTable) conn.getTable(table3_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0)); + hTable.close(); + + hTable = (HTable) conn.getTable(table4_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0)); + hTable.close(); + + // #6 - restore incremental backup for multiple tables, with overwrite + TableName[] tablesRestoreIncMultiple = + new TableName[] { table1, table2, table3 }; + TableName[] tablesMapIncMultiple = + new TableName[] { table1_restore, table2_restore, table3_restore }; + + restoreRequest = new RestoreRequest(); + restoreRequest.setBackupRootDir(BACKUP_ROOT_DIR).setBackupId(backupIdIncMultiple). + setCheck(false).setOverwrite(true). + setFromTables(tablesRestoreIncMultiple).setToTables(tablesMapIncMultiple); + + backupAdmin.restore(restoreRequest); + + hTable = (HTable) conn.getTable(table1_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2)); + hTable.close(); + + hTable = (HTable) conn.getTable(table2_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5)); + hTable.close(); + + hTable = (HTable) conn.getTable(table3_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0)); + hTable.close(); + + // #7 - incremental backup for single, empty table + + tables = toList(table4.getNameAsString()); + request = new BackupRequest(); + request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) + .setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdIncEmpty = admin.getBackupAdmin().backupTables(request); + + + // #8 - restore incremental backup for single empty table, with overwrite + TableName[] tablesRestoreIncEmpty = new TableName[] { table4 }; + TableName[] tablesMapIncEmpty = new TableName[] { table4_restore }; + + restoreRequest = new RestoreRequest(); + restoreRequest.setBackupRootDir(BACKUP_ROOT_DIR).setBackupId(backupIdIncEmpty). + setCheck(false).setOverwrite(true). + setFromTables(tablesRestoreIncEmpty).setToTables(tablesMapIncEmpty); + + backupAdmin.restore(restoreRequest); + + hTable = (HTable) conn.getTable(table4_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0)); + hTable.close(); + admin.close(); + conn.close(); + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java new file mode 100644 index 0000000..efa45b6 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -0,0 +1,292 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.BackupAdmin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; +import org.junit.AfterClass; +import org.junit.BeforeClass; + + +/** + * This class is only a base for other integration-level backup tests. Do not add tests here. + * TestBackupSmallTests is where tests that don't require bring machines up/down should go All other + * tests should have their own classes and extend this one + */ +public class TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestBackupBase.class); + + protected static Configuration conf1; + protected static Configuration conf2; + + protected static HBaseTestingUtility TEST_UTIL; + protected static HBaseTestingUtility TEST_UTIL2; + protected static TableName table1 = TableName.valueOf("table1"); + protected static TableName table2 = TableName.valueOf("table2"); + protected static TableName table3 = TableName.valueOf("table3"); + protected static TableName table4 = TableName.valueOf("table4"); + + protected static TableName table1_restore = TableName.valueOf("ns1:table1_restore"); + protected static TableName table2_restore = TableName.valueOf("ns2:table2_restore"); + protected static TableName table3_restore = TableName.valueOf("ns3:table3_restore"); + protected static TableName table4_restore = TableName.valueOf("ns4:table4_restore"); + + protected static final int NB_ROWS_IN_BATCH = 999; + protected static final byte[] qualName = Bytes.toBytes("q1"); + protected static final byte[] famName = Bytes.toBytes("f"); + + protected static String BACKUP_ROOT_DIR = "/backupUT"; + protected static String BACKUP_REMOTE_ROOT_DIR = "/backupUT"; + + protected static final String BACKUP_ZNODE = "/backup/hbase"; + protected static final String BACKUP_SUCCEED_NODE = "complete"; + protected static final String BACKUP_FAILED_NODE = "failed"; + + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL = new HBaseTestingUtility(); + conf1 = TEST_UTIL.getConfiguration(); + conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); + // Set MultiWAL (with 2 default WAL files per RS) + //conf1.set(WAL_PROVIDER, "multiwal"); + TEST_UTIL.startMiniZKCluster(); + MiniZooKeeperCluster miniZK = TEST_UTIL.getZkCluster(); + + conf2 = HBaseConfiguration.create(conf1); + conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); + TEST_UTIL2 = new HBaseTestingUtility(conf2); + TEST_UTIL2.setZkCluster(miniZK); + TEST_UTIL.startMiniCluster(); + TEST_UTIL2.startMiniCluster(); + conf1 = TEST_UTIL.getConfiguration(); + + TEST_UTIL.startMiniMapReduceCluster(); + BACKUP_ROOT_DIR = TEST_UTIL.getConfiguration().get("fs.defaultFS") + "/backupUT"; + LOG.info("ROOTDIR " + BACKUP_ROOT_DIR); + BACKUP_REMOTE_ROOT_DIR = TEST_UTIL2.getConfiguration().get("fs.defaultFS") + "/backupUT"; + LOG.info("REMOTE ROOTDIR " + BACKUP_REMOTE_ROOT_DIR); + waitForSystemTable(); + createTables(); + } + + public static void waitForSystemTable() throws Exception + { + try(Admin admin = TEST_UTIL.getAdmin();) { + while (!admin.tableExists(BackupSystemTable.getTableName()) + || !admin.isTableAvailable(BackupSystemTable.getTableName())) { + Thread.sleep(1000); + } + } + LOG.debug("backup table exists and available"); + + } + + /** + * @throws java.lang.Exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getHBaseAdmin()); + SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL); + TEST_UTIL2.shutdownMiniCluster(); + TEST_UTIL.shutdownMiniCluster(); + TEST_UTIL.shutdownMiniMapReduceCluster(); + } + + protected String backupTables(BackupType type, List tables, String path) + throws IOException { + Connection conn = null; + HBaseAdmin admin = null; + BackupAdmin badmin = null; + String backupId; + try { + conn = ConnectionFactory.createConnection(conf1); + admin = (HBaseAdmin) conn.getAdmin(); + BackupRequest request = new BackupRequest(); + request.setBackupType(type).setTableList(tables).setTargetRootDir(path); + badmin = admin.getBackupAdmin(); + backupId = badmin.backupTables(request); + } finally { + if(badmin != null){ + badmin.close(); + } + if (admin != null) { + admin.close(); + } + if (conn != null) { + conn.close(); + } + } + return backupId; + } + + protected String fullTableBackup(List tables) throws IOException { + return backupTables(BackupType.FULL, tables, BACKUP_ROOT_DIR); + } + + protected String incrementalTableBackup(List tables) throws IOException { + return backupTables(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR); + } + + protected static void loadTable(Table table) throws Exception { + + Put p; // 100 + 1 row to t1_syncup + for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { + p = new Put(Bytes.toBytes("row" + i)); + p.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + table.put(p); + } + } + + protected static void createTables() throws Exception { + + long tid = System.currentTimeMillis(); + table1 = TableName.valueOf("ns1:test-" + tid); + HBaseAdmin ha = TEST_UTIL.getHBaseAdmin(); + + // Create namespaces + NamespaceDescriptor desc1 = NamespaceDescriptor.create("ns1").build(); + NamespaceDescriptor desc2 = NamespaceDescriptor.create("ns2").build(); + NamespaceDescriptor desc3 = NamespaceDescriptor.create("ns3").build(); + NamespaceDescriptor desc4 = NamespaceDescriptor.create("ns4").build(); + + ha.createNamespace(desc1); + ha.createNamespace(desc2); + ha.createNamespace(desc3); + ha.createNamespace(desc4); + + + HTableDescriptor desc = new HTableDescriptor(table1); + HColumnDescriptor fam = new HColumnDescriptor(famName); + desc.addFamily(fam); + ha.createTable(desc); + Connection conn = ConnectionFactory.createConnection(conf1); + Table table = (HTable) conn.getTable(table1); + loadTable(table); + table.close(); + table2 = TableName.valueOf("ns2:test-" + tid + 1); + desc = new HTableDescriptor(table2); + desc.addFamily(fam); + ha.createTable(desc); + table = (HTable) conn.getTable(table2); + loadTable(table); + table.close(); + table3 = TableName.valueOf("ns3:test-" + tid + 2); + table = TEST_UTIL.createTable(table3, famName); + table.close(); + table4 = TableName.valueOf("ns4:test-" + tid + 3); + table = TEST_UTIL.createTable(table4, famName); + table.close(); + ha.close(); + conn.close(); + } + + protected boolean checkSucceeded(String backupId) throws IOException { + BackupInfo status = getBackupContext(backupId); + if (status == null) return false; + return status.getState() == BackupState.COMPLETE; + } + + protected boolean checkFailed(String backupId) throws IOException { + BackupInfo status = getBackupContext(backupId); + if (status == null) return false; + return status.getState() == BackupState.FAILED; + } + + private BackupInfo getBackupContext(String backupId) throws IOException { + try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + BackupInfo status = table.readBackupInfo(backupId); + return status; + } + } + + protected BackupAdmin getBackupAdmin() throws IOException { + return TEST_UTIL.getAdmin().getBackupAdmin(); + } + + /** + * Get restore request. + * + */ + public RestoreRequest createRestoreRequest( + String backupRootDir, + String backupId, boolean check, TableName[] fromTables, + TableName[] toTables, boolean isOverwrite) { + RestoreRequest request = new RestoreRequest(); + request.setBackupRootDir(backupRootDir).setBackupId(backupId).setCheck(check). + setFromTables(fromTables).setToTables(toTables).setOverwrite(isOverwrite); + return request; +} + + /** + * Helper method + */ + protected List toList(String... args){ + List ret = new ArrayList<>(); + for(int i=0; i < args.length; i++){ + ret.add(TableName.valueOf(args[i])); + } + return ret; + } + + protected void dumpBackupDir() throws IOException + { + // Dump Backup Dir + FileSystem fs = FileSystem.get(conf1); + RemoteIterator it = fs.listFiles( new Path(BACKUP_ROOT_DIR), true); + while(it.hasNext()){ + LOG.debug("DDEBUG: "+it.next().getPath()); + } + + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java new file mode 100644 index 0000000..62c47d6 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBoundaryTests.java @@ -0,0 +1,97 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestBackupBoundaryTests extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestBackupBoundaryTests.class); + + /** + * Verify that full backup is created on a single empty table correctly. + * @throws Exception + */ + @Test + public void testFullBackupSingleEmpty() throws Exception { + + LOG.info("create full backup image on single table"); + List tables = Lists.newArrayList(table3); + LOG.info("Finished Backup " + fullTableBackup(tables)); + } + + /** + * Verify that full backup is created on multiple empty tables correctly. + * @throws Exception + */ + @Test + public void testFullBackupMultipleEmpty() throws Exception { + LOG.info("create full backup image on mulitple empty tables"); + + List tables = Lists.newArrayList(table3, table4); + fullTableBackup(tables); + } + + /** + * Verify that full backup fails on a single table that does not exist. + * @throws Exception + */ + @Test(expected = DoNotRetryIOException.class) + public void testFullBackupSingleDNE() throws Exception { + + LOG.info("test full backup fails on a single table that does not exist"); + List tables = toList("tabledne"); + fullTableBackup(tables); + } + + /** + * Verify that full backup fails on multiple tables that do not exist. + * @throws Exception + */ + @Test(expected = DoNotRetryIOException.class) + public void testFullBackupMultipleDNE() throws Exception { + + LOG.info("test full backup fails on multiple tables that do not exist"); + List tables = toList("table1dne", "table2dne"); + fullTableBackup(tables); + } + + /** + * Verify that full backup fails on tableset containing real and fake tables. + * @throws Exception + */ + @Test(expected = DoNotRetryIOException.class) + public void testFullBackupMixExistAndDNE() throws Exception { + LOG.info("create full backup fails on tableset containing real and fake table"); + + List tables = toList(table1.getNameAsString(), "tabledne"); + fullTableBackup(tables); + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java new file mode 100644 index 0000000..d3c451d --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDelete.java @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestBackupDelete extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestBackupDelete.class); + + /** + * Verify that full backup is created on a single table with data correctly. Verify that history + * works as expected + * @throws Exception + */ + @Test + public void testBackupDelete() throws Exception { + LOG.info("test backup delete on a single table with data"); + List tableList = Lists.newArrayList(table1); + String backupId = fullTableBackup(tableList); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete"); + String[] backupIds = new String[] { backupId }; + BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection()); + BackupInfo info = table.readBackupInfo(backupId); + Path path = new Path(info.getTargetRootDir(), backupId); + FileSystem fs = FileSystem.get(path.toUri(), conf1); + assertTrue(fs.exists(path)); + int deleted = getBackupAdmin().deleteBackups(backupIds); + + assertTrue(!fs.exists(path)); + assertTrue(fs.exists(new Path(info.getTargetRootDir()))); + assertTrue(1 == deleted); + table.close(); + LOG.info("delete_backup"); + } + + /** + * Verify that full backup is created on a single table with data correctly. Verify that history + * works as expected + * @throws Exception + */ + @Test + public void testBackupDeleteCommand() throws Exception { + LOG.info("test backup delete on a single table with data: command-line"); + List tableList = Lists.newArrayList(table1); + String backupId = fullTableBackup(tableList); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete"); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + System.setOut(new PrintStream(baos)); + + String[] args = new String[]{"delete", backupId }; + // Run backup + + try{ + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + } catch(Exception e){ + LOG.error("failed", e); + } + LOG.info("delete_backup"); + String output = baos.toString(); + LOG.info(baos.toString()); + assertTrue(output.indexOf("Deleted 1 backups") >= 0); + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java new file mode 100644 index 0000000..2e04f1a --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.BackupAdmin; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(MediumTests.class) +public class TestBackupDeleteRestore extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestBackupDeleteRestore.class); + + /** + * Verify that load data- backup - delete some data - restore + * works as expected - deleted data get restored. + * @throws Exception + */ + @Test + public void testBackupDeleteRestore() throws Exception { + + LOG.info("test full restore on a single table empty table"); + + List tables = Lists.newArrayList(table1); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete"); + int numRows = TEST_UTIL.countRows(table1); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + // delete row + try ( Table table = TEST_UTIL.getConnection().getTable(table1);) { + Delete delete = new Delete("row0".getBytes()); + table.delete(delete); + hba.flush(table1); + } + + TableName[] tableset = new TableName[] { table1 }; + TableName[] tablemap = null;//new TableName[] { table1_restore }; + BackupAdmin client = getBackupAdmin(); + client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap, true)); + + + int numRowsAfterRestore = TEST_UTIL.countRows(table1); + assertEquals( numRows, numRowsAfterRestore); + hba.close(); + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java new file mode 100644 index 0000000..af377c3 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDescribe.java @@ -0,0 +1,106 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestBackupDescribe extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestBackupDescribe.class); + + /** + * Verify that full backup is created on a single table with data correctly. Verify that describe + * works as expected + * @throws Exception + */ + @Test + public void testBackupDescribe() throws Exception { + + LOG.info("test backup describe on a single table with data"); + + List tableList = Lists.newArrayList(table1); + String backupId = fullTableBackup(tableList); + + LOG.info("backup complete"); + assertTrue(checkSucceeded(backupId)); + + + BackupInfo info = getBackupAdmin().getBackupInfo(backupId); + assertTrue(info.getState() == BackupState.COMPLETE); + + } + + @Test + public void testBackupSetCommandWithNonExistentTable() throws Exception { + String[] args = new String[]{"set", "add", "some_set", "table" }; + // Run backup + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertNotEquals(ret, 0); + } + + @Test + public void testBackupDescribeCommand() throws Exception { + + LOG.info("test backup describe on a single table with data: command-line"); + + List tableList = Lists.newArrayList(table1); + String backupId = fullTableBackup(tableList); + + LOG.info("backup complete"); + assertTrue(checkSucceeded(backupId)); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + System.setOut(new PrintStream(baos)); + + String[] args = new String[]{"describe", backupId }; + // Run backup + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + String response = baos.toString(); + assertTrue(response.indexOf(backupId) > 0); + assertTrue(response.indexOf("COMPLETE") > 0); + + BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection()); + BackupInfo status = table.readBackupInfo(backupId); + String desc = status.getShortDescription(); + table.close(); + assertTrue(response.indexOf(desc) >= 0); + + } + + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java new file mode 100644 index 0000000..50295b6 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupLogCleaner.java @@ -0,0 +1,159 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.master.BackupLogCleaner; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestBackupLogCleaner extends TestBackupBase { + private static final Log LOG = LogFactory.getLog(TestBackupLogCleaner.class); + + // implements all test cases in 1 test since incremental full backup/ + // incremental backup has dependencies + @Test + public void testBackupLogCleaner() throws Exception { + + // #1 - create full backup for all tables + LOG.info("create full backup image for all tables"); + + List tableSetFullList = Lists.newArrayList(table1, table2, table3, table4); + + try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) { + // Verify that we have no backup sessions yet + assertFalse(systemTable.hasBackupSessions()); + + List walFiles = getListOfWALFiles(TEST_UTIL.getConfiguration()); + List swalFiles = convert(walFiles); + BackupLogCleaner cleaner = new BackupLogCleaner(); + cleaner.setConf(TEST_UTIL.getConfiguration()); + + Iterable deletable = cleaner.getDeletableFiles(walFiles); + int size = Iterables.size(deletable); + + // We can delete all files because we do not have yet recorded backup sessions + assertTrue(size == walFiles.size()); + + systemTable.addWALFiles(swalFiles, "backup", "root"); + String backupIdFull = fullTableBackup(tableSetFullList); + assertTrue(checkSucceeded(backupIdFull)); + // Check one more time + deletable = cleaner.getDeletableFiles(walFiles); + // We can delete wal files because they were saved into hbase:backup table + size = Iterables.size(deletable); + assertTrue(size == walFiles.size()); + + List newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration()); + LOG.debug("WAL list after full backup"); + convert(newWalFiles); + + // New list of wal files is greater than the previous one, + // because new wal per RS have been opened after full backup + assertTrue(walFiles.size() < newWalFiles.size()); + Connection conn = ConnectionFactory.createConnection(conf1); + // #2 - insert some data to table + HTable t1 = (HTable) conn.getTable(table1); + Put p1; + for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { + p1 = new Put(Bytes.toBytes("row-t1" + i)); + p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t1.put(p1); + } + + t1.close(); + + HTable t2 = (HTable) conn.getTable(table2); + Put p2; + for (int i = 0; i < 5; i++) { + p2 = new Put(Bytes.toBytes("row-t2" + i)); + p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t2.put(p2); + } + + t2.close(); + + // #3 - incremental backup for multiple tables + + List tableSetIncList = Lists.newArrayList(table1, table2, table3); + String backupIdIncMultiple = backupTables(BackupType.INCREMENTAL, tableSetIncList, + BACKUP_ROOT_DIR); + assertTrue(checkSucceeded(backupIdIncMultiple)); + deletable = cleaner.getDeletableFiles(newWalFiles); + + assertTrue(Iterables.size(deletable) == newWalFiles.size()); + + conn.close(); + } + } + + private List convert(List walFiles) { + List result = new ArrayList(); + for (FileStatus fs : walFiles) { + LOG.debug("+++WAL: " + fs.getPath().toString()); + result.add(fs.getPath().toString()); + } + return result; + } + + private List getListOfWALFiles(Configuration c) throws IOException { + Path logRoot = new Path(FSUtils.getRootDir(c), HConstants.HREGION_LOGDIR_NAME); + FileSystem fs = FileSystem.get(c); + RemoteIterator it = fs.listFiles(logRoot, true); + List logFiles = new ArrayList(); + while (it.hasNext()) { + LocatedFileStatus lfs = it.next(); + if (lfs.isFile() && !AbstractFSWALProvider.isMetaFile(lfs.getPath())) { + logFiles.add(lfs); + LOG.info(lfs); + } + } + return logFiles; + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java new file mode 100644 index 0000000..a7d2750 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupShowHistory.java @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestBackupShowHistory extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestBackupShowHistory.class); + + /** + * Verify that full backup is created on a single table with data correctly. Verify that history + * works as expected + * @throws Exception + */ + @Test + public void testBackupHistory() throws Exception { + + LOG.info("test backup history on a single table with data"); + + List tableList = Lists.newArrayList(table1); + String backupId = fullTableBackup(tableList); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete"); + + List history = getBackupAdmin().getHistory(10); + assertTrue(history.size() > 0); + boolean success = false; + for(BackupInfo info: history){ + if(info.getBackupId().equals(backupId)){ + success = true; break; + } + } + assertTrue(success); + LOG.info("show_history"); + + } + + @Test + public void testBackupHistoryCommand() throws Exception { + + LOG.info("test backup history on a single table with data: command-line"); + + List tableList = Lists.newArrayList(table1); + String backupId = fullTableBackup(tableList); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete"); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + System.setOut(new PrintStream(baos)); + + String[] args = new String[]{"history", "-n", "10" }; + // Run backup + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + LOG.info("show_history"); + String output = baos.toString(); + LOG.info(baos.toString()); + assertTrue(output.indexOf(backupId) > 0); + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java new file mode 100644 index 0000000..be4019b --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupStatusProgress.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestBackupStatusProgress extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestBackupStatusProgress.class); + + /** + * Verify that full backup is created on a single table with data correctly. + * @throws Exception + */ + @Test + public void testBackupStatusProgress() throws Exception { + + LOG.info("test backup status/progress on a single table with data"); + + List tableList = Lists.newArrayList(table1); + String backupId = fullTableBackup(tableList); + LOG.info("backup complete"); + assertTrue(checkSucceeded(backupId)); + + + BackupInfo info = getBackupAdmin().getBackupInfo(backupId); + assertTrue(info.getState() == BackupState.COMPLETE); + int p = getBackupAdmin().getProgress(backupId); + LOG.debug(info.getShortDescription()); + assertTrue(p > 0); + + } + + @Test + public void testBackupStatusProgressCommand() throws Exception { + + LOG.info("test backup status/progress on a single table with data: command-line"); + + List tableList = Lists.newArrayList(table1); + String backupId = fullTableBackup(tableList); + LOG.info("backup complete"); + assertTrue(checkSucceeded(backupId)); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + System.setOut(new PrintStream(baos)); + + String[] args = new String[]{"describe", backupId }; + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + String responce = baos.toString(); + assertTrue(responce.indexOf(backupId) > 0); + assertTrue(responce.indexOf("COMPLETE") > 0); + + baos = new ByteArrayOutputStream(); + System.setOut(new PrintStream(baos)); + + args = new String[]{"progress", backupId }; + ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret ==0); + responce = baos.toString(); + assertTrue(responce.indexOf(backupId) >= 0); + assertTrue(responce.indexOf("progress") > 0); + assertTrue(responce.indexOf("100") > 0); + + + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java new file mode 100644 index 0000000..90eac49 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupSystemTable.java @@ -0,0 +1,526 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Test cases for hbase:backup API + * + */ +@Category(MediumTests.class) +public class TestBackupSystemTable { + + private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + protected static Configuration conf = UTIL.getConfiguration(); + protected static MiniHBaseCluster cluster; + protected static Connection conn; + protected BackupSystemTable table; + + @BeforeClass + public static void setUp() throws Exception { + cluster = UTIL.startMiniCluster(); + conn = UTIL.getConnection(); + waitForSystemTable(); + } + + static void waitForSystemTable() throws Exception + { + try(Admin admin = UTIL.getAdmin();) { + while (!admin.tableExists(BackupSystemTable.getTableName()) + || !admin.isTableAvailable(BackupSystemTable.getTableName())) { + Thread.sleep(1000); + } + } + } + + @Before + public void before() throws IOException { + table = new BackupSystemTable(conn); + } + + @After + public void after() { + if (table != null) { + table.close(); + } + + } + + @Test + public void testUpdateReadDeleteBackupStatus() throws IOException { + BackupInfo ctx = createBackupContext(); + table.updateBackupInfo(ctx); + BackupInfo readCtx = table.readBackupInfo(ctx.getBackupId()); + assertTrue(compare(ctx, readCtx)); + // try fake backup id + readCtx = table.readBackupInfo("fake"); + assertNull(readCtx); + // delete backup context + table.deleteBackupInfo(ctx.getBackupId()); + readCtx = table.readBackupInfo(ctx.getBackupId()); + assertNull(readCtx); + cleanBackupTable(); + } + + @Test + public void testWriteReadBackupStartCode() throws IOException { + Long code = 100L; + table.writeBackupStartCode(code, "root"); + String readCode = table.readBackupStartCode("root"); + assertEquals(code, new Long(Long.parseLong(readCode))); + cleanBackupTable(); + } + + private void cleanBackupTable() throws IOException { + Admin admin = UTIL.getHBaseAdmin(); + admin.disableTable(TableName.BACKUP_TABLE_NAME); + admin.truncateTable(TableName.BACKUP_TABLE_NAME, true); + if (admin.isTableDisabled(TableName.BACKUP_TABLE_NAME)) { + admin.enableTable(TableName.BACKUP_TABLE_NAME); + } + } + + @Test + public void testBackupHistory() throws IOException { + int n = 10; + List list = createBackupContextList(n); + + // Load data + for (BackupInfo bc : list) { + // Make sure we set right status + bc.setState(BackupState.COMPLETE); + table.updateBackupInfo(bc); + } + + // Reverse list for comparison + Collections.reverse(list); + ArrayList history = table.getBackupHistory(); + assertTrue(history.size() == n); + + for (int i = 0; i < n; i++) { + BackupInfo ctx = list.get(i); + BackupInfo data = history.get(i); + assertTrue(compare(ctx, data)); + } + + cleanBackupTable(); + + } + + @Test + public void testBackupDelete() throws IOException { + + try (BackupSystemTable table = new BackupSystemTable(conn)) { + + int n = 10; + List list = createBackupContextList(n); + + // Load data + for (BackupInfo bc : list) { + // Make sure we set right status + bc.setState(BackupState.COMPLETE); + table.updateBackupInfo(bc); + } + + // Verify exists + for (BackupInfo bc : list) { + assertNotNull(table.readBackupInfo(bc.getBackupId())); + } + + // Delete all + for (BackupInfo bc : list) { + table.deleteBackupInfo(bc.getBackupId()); + } + + // Verify do not exists + for (BackupInfo bc : list) { + assertNull(table.readBackupInfo(bc.getBackupId())); + } + + cleanBackupTable(); + } + + } + + + + @Test + public void testRegionServerLastLogRollResults() throws IOException { + String[] servers = new String[] { "server1", "server2", "server3" }; + Long[] timestamps = new Long[] { 100L, 102L, 107L }; + + for (int i = 0; i < servers.length; i++) { + table.writeRegionServerLastLogRollResult(servers[i], timestamps[i], "root"); + } + + HashMap result = table.readRegionServerLastLogRollResult("root"); + assertTrue(servers.length == result.size()); + Set keys = result.keySet(); + String[] keysAsArray = new String[keys.size()]; + keys.toArray(keysAsArray); + Arrays.sort(keysAsArray); + + for (int i = 0; i < keysAsArray.length; i++) { + assertEquals(keysAsArray[i], servers[i]); + Long ts1 = timestamps[i]; + Long ts2 = result.get(keysAsArray[i]); + assertEquals(ts1, ts2); + } + + cleanBackupTable(); + } + + @Test + public void testIncrementalBackupTableSet() throws IOException { + TreeSet tables1 = new TreeSet<>(); + + tables1.add(TableName.valueOf("t1")); + tables1.add(TableName.valueOf("t2")); + tables1.add(TableName.valueOf("t3")); + + TreeSet tables2 = new TreeSet<>(); + + tables2.add(TableName.valueOf("t3")); + tables2.add(TableName.valueOf("t4")); + tables2.add(TableName.valueOf("t5")); + + table.addIncrementalBackupTableSet(tables1, "root"); + BackupSystemTable table = new BackupSystemTable(conn); + TreeSet res1 = (TreeSet) + table.getIncrementalBackupTableSet("root"); + assertTrue(tables1.size() == res1.size()); + Iterator desc1 = tables1.descendingIterator(); + Iterator desc2 = res1.descendingIterator(); + while (desc1.hasNext()) { + assertEquals(desc1.next(), desc2.next()); + } + + table.addIncrementalBackupTableSet(tables2, "root"); + TreeSet res2 = (TreeSet) + table.getIncrementalBackupTableSet("root"); + assertTrue((tables2.size() + tables1.size() - 1) == res2.size()); + + tables1.addAll(tables2); + + desc1 = tables1.descendingIterator(); + desc2 = res2.descendingIterator(); + + while (desc1.hasNext()) { + assertEquals(desc1.next(), desc2.next()); + } + cleanBackupTable(); + + } + + @Test + public void testRegionServerLogTimestampMap() throws IOException { + TreeSet tables = new TreeSet<>(); + + tables.add(TableName.valueOf("t1")); + tables.add(TableName.valueOf("t2")); + tables.add(TableName.valueOf("t3")); + + HashMap rsTimestampMap = new HashMap(); + + rsTimestampMap.put("rs1", 100L); + rsTimestampMap.put("rs2", 101L); + rsTimestampMap.put("rs3", 103L); + + table.writeRegionServerLogTimestamp(tables, rsTimestampMap, "root"); + + HashMap> result = table.readLogTimestampMap("root"); + + assertTrue(tables.size() == result.size()); + + for (TableName t : tables) { + HashMap rstm = result.get(t); + assertNotNull(rstm); + assertEquals(rstm.get("rs1"), new Long(100L)); + assertEquals(rstm.get("rs2"), new Long(101L)); + assertEquals(rstm.get("rs3"), new Long(103L)); + } + + Set tables1 = new TreeSet<>(); + + tables1.add(TableName.valueOf("t3")); + tables1.add(TableName.valueOf("t4")); + tables1.add(TableName.valueOf("t5")); + + HashMap rsTimestampMap1 = new HashMap(); + + rsTimestampMap1.put("rs1", 200L); + rsTimestampMap1.put("rs2", 201L); + rsTimestampMap1.put("rs3", 203L); + + table.writeRegionServerLogTimestamp(tables1, rsTimestampMap1, "root"); + + result = table.readLogTimestampMap("root"); + + assertTrue(5 == result.size()); + + for (TableName t : tables) { + HashMap rstm = result.get(t); + assertNotNull(rstm); + if (t.equals(TableName.valueOf("t3")) == false) { + assertEquals(rstm.get("rs1"), new Long(100L)); + assertEquals(rstm.get("rs2"), new Long(101L)); + assertEquals(rstm.get("rs3"), new Long(103L)); + } else { + assertEquals(rstm.get("rs1"), new Long(200L)); + assertEquals(rstm.get("rs2"), new Long(201L)); + assertEquals(rstm.get("rs3"), new Long(203L)); + } + } + + for (TableName t : tables1) { + HashMap rstm = result.get(t); + assertNotNull(rstm); + assertEquals(rstm.get("rs1"), new Long(200L)); + assertEquals(rstm.get("rs2"), new Long(201L)); + assertEquals(rstm.get("rs3"), new Long(203L)); + } + + cleanBackupTable(); + + } + + @Test + public void testAddWALFiles() throws IOException { + List files = + Arrays.asList("hdfs://server/WALs/srv1,101,15555/srv1,101,15555.default.1", + "hdfs://server/WALs/srv2,102,16666/srv2,102,16666.default.2", + "hdfs://server/WALs/srv3,103,17777/srv3,103,17777.default.3"); + String newFile = "hdfs://server/WALs/srv1,101,15555/srv1,101,15555.default.5"; + + table.addWALFiles(files, "backup", "root"); + + assertTrue(table.isWALFileDeletable(files.get(0))); + assertTrue(table.isWALFileDeletable(files.get(1))); + assertTrue(table.isWALFileDeletable(files.get(2))); + assertFalse(table.isWALFileDeletable(newFile)); + + cleanBackupTable(); + } + + + /** + * Backup set tests + */ + + @Test + public void testBackupSetAddNotExists() throws IOException { + try (BackupSystemTable table = new BackupSystemTable(conn)) { + + String[] tables = new String[] { "table1", "table2", "table3" }; + String setName = "name"; + table.addToBackupSet(setName, tables); + List tnames = table.describeBackupSet(setName); + assertTrue(tnames != null); + assertTrue(tnames.size() == tables.length); + for (int i = 0; i < tnames.size(); i++) { + assertTrue(tnames.get(i).getNameAsString().equals(tables[i])); + } + cleanBackupTable(); + } + + } + + @Test + public void testBackupSetAddExists() throws IOException { + try (BackupSystemTable table = new BackupSystemTable(conn)) { + + String[] tables = new String[] { "table1", "table2", "table3" }; + String setName = "name"; + table.addToBackupSet(setName, tables); + String[] addTables = new String[] { "table4", "table5", "table6" }; + table.addToBackupSet(setName, addTables); + + List tnames = table.describeBackupSet(setName); + assertTrue(tnames != null); + assertTrue(tnames.size() == tables.length + addTables.length); + for (int i = 0; i < tnames.size(); i++) { + assertTrue(tnames.get(i).getNameAsString().equals("table" + (i + 1))); + } + cleanBackupTable(); + } + } + + @Test + public void testBackupSetAddExistsIntersects() throws IOException { + try (BackupSystemTable table = new BackupSystemTable(conn)) { + + String[] tables = new String[] { "table1", "table2", "table3" }; + String setName = "name"; + table.addToBackupSet(setName, tables); + String[] addTables = new String[] { "table3", "table4", "table5", "table6" }; + table.addToBackupSet(setName, addTables); + + List tnames = table.describeBackupSet(setName); + assertTrue(tnames != null); + assertTrue(tnames.size()== tables.length + addTables.length - 1); + for (int i = 0; i < tnames.size(); i++) { + assertTrue(tnames.get(i).getNameAsString().equals("table" + (i + 1))); + } + cleanBackupTable(); + } + } + + @Test + public void testBackupSetRemoveSomeNotExists() throws IOException { + try (BackupSystemTable table = new BackupSystemTable(conn)) { + + String[] tables = new String[] { "table1", "table2", "table3", "table4" }; + String setName = "name"; + table.addToBackupSet(setName, tables); + String[] removeTables = new String[] { "table4", "table5", "table6" }; + table.removeFromBackupSet(setName, removeTables); + + List tnames = table.describeBackupSet(setName); + assertTrue(tnames != null); + assertTrue(tnames.size() == tables.length - 1); + for (int i = 0; i < tnames.size(); i++) { + assertTrue(tnames.get(i).getNameAsString().equals("table" + (i + 1))); + } + cleanBackupTable(); + } + } + + @Test + public void testBackupSetRemove() throws IOException { + try (BackupSystemTable table = new BackupSystemTable(conn)) { + + String[] tables = new String[] { "table1", "table2", "table3", "table4" }; + String setName = "name"; + table.addToBackupSet(setName, tables); + String[] removeTables = new String[] { "table4", "table3" }; + table.removeFromBackupSet(setName, removeTables); + + List tnames = table.describeBackupSet(setName); + assertTrue(tnames != null); + assertTrue(tnames.size() == tables.length - 2); + for (int i = 0; i < tnames.size(); i++) { + assertTrue(tnames.get(i).getNameAsString().equals("table" + (i + 1))); + } + cleanBackupTable(); + } + } + + @Test + public void testBackupSetDelete() throws IOException { + try (BackupSystemTable table = new BackupSystemTable(conn)) { + + String[] tables = new String[] { "table1", "table2", "table3", "table4" }; + String setName = "name"; + table.addToBackupSet(setName, tables); + table.deleteBackupSet(setName); + + List tnames = table.describeBackupSet(setName); + assertTrue(tnames == null); + cleanBackupTable(); + } + } + + @Test + public void testBackupSetList() throws IOException { + try (BackupSystemTable table = new BackupSystemTable(conn)) { + + String[] tables = new String[] { "table1", "table2", "table3", "table4" }; + String setName1 = "name1"; + String setName2 = "name2"; + table.addToBackupSet(setName1, tables); + table.addToBackupSet(setName2, tables); + + List list = table.listBackupSets(); + + assertTrue(list.size() == 2); + assertTrue(list.get(0).equals(setName1)); + assertTrue(list.get(1).equals(setName2)); + + cleanBackupTable(); + } + } + + + private boolean compare(BackupInfo one, BackupInfo two) { + return one.getBackupId().equals(two.getBackupId()) && one.getType().equals(two.getType()) + && one.getTargetRootDir().equals(two.getTargetRootDir()) + && one.getStartTs() == two.getStartTs() && one.getEndTs() == two.getEndTs(); + } + + private BackupInfo createBackupContext() { + + BackupInfo ctxt = + new BackupInfo("backup_" + System.nanoTime(), BackupType.FULL, + new TableName[] { + TableName.valueOf("t1"), TableName.valueOf("t2"), TableName.valueOf("t3") }, + "/hbase/backup"); + ctxt.setStartTs(System.currentTimeMillis()); + ctxt.setEndTs(System.currentTimeMillis() + 1); + return ctxt; + } + + private List createBackupContextList(int size) { + List list = new ArrayList(); + for (int i = 0; i < size; i++) { + list.add(createBackupContext()); + try { + Thread.sleep(10); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + return list; + } + + @AfterClass + public static void tearDown() throws IOException { + if (cluster != null) cluster.shutdown(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java new file mode 100644 index 0000000..3d37d07 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackup.java @@ -0,0 +1,141 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestFullBackup extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestFullBackup.class); + + /** + * Verify that full backup is created on a single table with data correctly. + * @throws Exception + */ + @Test + public void testFullBackupSingle() throws Exception { + LOG.info("test full backup on a single table with data"); + List tables = Lists.newArrayList(table1); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete for " + backupId); + } + + /** + * Verify that full backup is created on a single table with data correctly. + * @throws Exception + */ + @Test + public void testFullBackupSingleCommand() throws Exception { + LOG.info("test full backup on a single table with data: command-line"); + try(BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + int before = table.getBackupHistory().size(); + String[] args = new String[]{"create", "full", BACKUP_ROOT_DIR, table1.getNameAsString() }; + // Run backup + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + ArrayList backups = table.getBackupHistory(); + int after = table.getBackupHistory().size(); + assertTrue(after == before +1); + for(BackupInfo data : backups){ + String backupId = data.getBackupId(); + assertTrue(checkSucceeded(backupId)); + } + } + LOG.info("backup complete"); + } + + + /** + * Verify that full backup is created on multiple tables correctly. + * @throws Exception + */ + @Test + public void testFullBackupMultiple() throws Exception { + LOG.info("create full backup image on multiple tables with data"); + List tables = Lists.newArrayList(table1, table1); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + } + + @Test + public void testFullBackupMultipleCommand() throws Exception { + LOG.info("test full backup on a multiple tables with data: command-line"); + try(BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + int before = table.getBackupHistory().size(); + String[] args = new String[]{"create", "full", BACKUP_ROOT_DIR, + table1.getNameAsString() +","+ table2.getNameAsString() }; + // Run backup + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + ArrayList backups = table.getBackupHistory(); + int after = table.getBackupHistory().size(); + assertTrue(after == before +1); + for(BackupInfo data : backups){ + String backupId = data.getBackupId(); + assertTrue(checkSucceeded(backupId)); + } + } + LOG.info("backup complete"); + } + /** + * Verify that full backup is created on all tables correctly. + * @throws Exception + */ + @Test + public void testFullBackupAll() throws Exception { + LOG.info("create full backup image on all tables"); + String backupId = fullTableBackup(null); + assertTrue(checkSucceeded(backupId)); + + } + + @Test + public void testFullBackupAllCommand() throws Exception { + LOG.info("create full backup image on all tables: command-line"); + try(BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + int before = table.getBackupHistory().size(); + String[] args = new String[]{"create", "full", BACKUP_ROOT_DIR }; + // Run backup + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + ArrayList backups = table.getBackupHistory(); + int after = table.getBackupHistory().size(); + assertTrue(after == before +1); + for(BackupInfo data : backups){ + String backupId = data.getBackupId(); + assertTrue(checkSucceeded(backupId)); + } + } + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java new file mode 100644 index 0000000..a8fa7de --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSet.java @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.*; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(LargeTests.class) +public class TestFullBackupSet extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestFullBackupSet.class); + + + /** + * Verify that full backup is created on a single table with data correctly. + * @throws Exception + */ + @Test + public void testFullBackupSetExist() throws Exception { + + LOG.info("Test full backup, backup set exists"); + + //Create set + try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + String name = "name"; + table.addToBackupSet(name, new String[] { table1.getNameAsString() }); + List names = table.describeBackupSet(name); + + assertNotNull(names); + assertTrue(names.size() == 1); + assertTrue(names.get(0).equals(table1)); + + String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-set", name }; + // Run backup + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + ArrayList backups = table.getBackupHistory(); + assertTrue(backups.size() == 1); + String backupId = backups.get(0).getBackupId(); + assertTrue(checkSucceeded(backupId)); + + LOG.info("backup complete"); + + // Restore from set into other table + args = new String[]{BACKUP_ROOT_DIR, backupId, + "-set", name, table1_restore.getNameAsString(), "-overwrite" }; + // Run backup + ret = ToolRunner.run(conf1, new RestoreDriver(), args); + assertTrue(ret == 0); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table1_restore)); + // Verify number of rows in both tables + assertEquals(TEST_UTIL.countRows(table1), TEST_UTIL.countRows(table1_restore)); + TEST_UTIL.deleteTable(table1_restore); + LOG.info("restore into other table is complete"); + hba.close(); + + + } + + } + + @Test + public void testFullBackupSetDoesNotExist() throws Exception { + + LOG.info("TFBSE test full backup, backup set does not exist"); + String name = "name1"; + String[] args = new String[]{"create", "full", BACKUP_ROOT_DIR, "-set", name }; + // Run backup + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret != 0); + + } + +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java new file mode 100644 index 0000000..35e84b1 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullBackupSetRestoreSet.java @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.*; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(LargeTests.class) +public class TestFullBackupSetRestoreSet extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestFullBackupSetRestoreSet.class); + + @Test + public void testFullRestoreSetToOtherTable() throws Exception { + + LOG.info("Test full restore set"); + + // Create set + try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + String name = "name"; + table.addToBackupSet(name, new String[] { table1.getNameAsString() }); + List names = table.describeBackupSet(name); + + assertNotNull(names); + assertTrue(names.size() == 1); + assertTrue(names.get(0).equals(table1)); + + String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-set", name }; + // Run backup + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + ArrayList backups = table.getBackupHistory(); + assertTrue(backups.size() == 1); + String backupId = backups.get(0).getBackupId(); + assertTrue(checkSucceeded(backupId)); + + LOG.info("backup complete"); + + // Restore from set into other table + args = + new String[] { BACKUP_ROOT_DIR, backupId, "-set", name, table1_restore.getNameAsString(), + "-overwrite" }; + // Run backup + ret = ToolRunner.run(conf1, new RestoreDriver(), args); + assertTrue(ret == 0); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table1_restore)); + // Verify number of rows in both tables + assertEquals(TEST_UTIL.countRows(table1), TEST_UTIL.countRows(table1_restore)); + TEST_UTIL.deleteTable(table1_restore); + LOG.info("restore into other table is complete"); + hba.close(); + } + } + + @Test + public void testFullRestoreSetToSameTable() throws Exception { + + LOG.info("Test full restore set to same table"); + + // Create set + try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) { + String name = "name1"; + table.addToBackupSet(name, new String[] { table1.getNameAsString() }); + List names = table.describeBackupSet(name); + + assertNotNull(names); + assertTrue(names.size() == 1); + assertTrue(names.get(0).equals(table1)); + + String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-set", name }; + // Run backup + int ret = ToolRunner.run(conf1, new BackupDriver(), args); + assertTrue(ret == 0); + ArrayList backups = table.getBackupHistory(); + String backupId = backups.get(0).getBackupId(); + assertTrue(checkSucceeded(backupId)); + + LOG.info("backup complete"); + int count = TEST_UTIL.countRows(table1); + TEST_UTIL.deleteTable(table1); + + // Restore from set into other table + args = new String[] { BACKUP_ROOT_DIR, backupId, "-set", name, "-overwrite" }; + // Run backup + ret = ToolRunner.run(conf1, new RestoreDriver(), args); + assertTrue(ret == 0); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table1)); + // Verify number of rows in both tables + assertEquals(count, TEST_UTIL.countRows(table1)); + LOG.info("restore into same table is complete"); + hba.close(); + + } + + } + +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java new file mode 100644 index 0000000..0eddbfe --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java @@ -0,0 +1,321 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.List; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.BackupAdmin; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestFullRestore extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestFullRestore.class); + + /** + * Verify that a single table is restored to a new table + * @throws Exception + */ + @Test + public void testFullRestoreSingle() throws Exception { + + LOG.info("test full restore on a single table empty table"); + + List tables = Lists.newArrayList(table1); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + + LOG.info("backup complete"); + + TableName[] tableset = new TableName[] { table1 }; + TableName[] tablemap = new TableName[] { table1_restore }; + BackupAdmin client = getBackupAdmin(); + client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap, false)); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table1_restore)); + TEST_UTIL.deleteTable(table1_restore); + hba.close(); + } + + + @Test + public void testFullRestoreSingleCommand() throws Exception { + + LOG.info("test full restore on a single table empty table: command-line"); + + List tables = Lists.newArrayList(table1); + String backupId = fullTableBackup(tables); + LOG.info("backup complete"); + assertTrue(checkSucceeded(backupId)); + //restore [tableMapping] + String[] args = new String[]{BACKUP_ROOT_DIR, backupId, + table1.getNameAsString(), table1_restore.getNameAsString() }; + // Run backup + int ret = ToolRunner.run(conf1, new RestoreDriver(), args); + + assertTrue(ret==0); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table1_restore)); + TEST_UTIL.deleteTable(table1_restore); + hba.close(); + } + + /** + * Verify that multiple tables are restored to new tables. + * @throws Exception + */ + @Test + public void testFullRestoreMultiple() throws Exception { + LOG.info("create full backup image on multiple tables"); + List tables = Lists.newArrayList(table2, table3); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + + TableName[] restore_tableset = new TableName[] { table2, table3 }; + TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; + BackupAdmin client = getBackupAdmin(); + client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, + restore_tableset, tablemap, false)); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table2_restore)); + assertTrue(hba.tableExists(table3_restore)); + TEST_UTIL.deleteTable(table2_restore); + TEST_UTIL.deleteTable(table3_restore); + hba.close(); + } + + /** + * Verify that multiple tables are restored to new tables. + * @throws Exception + */ + @Test + public void testFullRestoreMultipleCommand() throws Exception { + LOG.info("create full backup image on multiple tables: command-line"); + List tables = Lists.newArrayList(table2, table3); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + + TableName[] restore_tableset = new TableName[] { table2, table3 }; + TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; + + + //restore [tableMapping] + String[] args = new String[]{BACKUP_ROOT_DIR, backupId, + StringUtils.join(restore_tableset, ","), + StringUtils.join(tablemap, ",") }; + // Run backup + int ret = ToolRunner.run(conf1, new RestoreDriver(), args); + + assertTrue(ret==0); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table2_restore)); + assertTrue(hba.tableExists(table3_restore)); + TEST_UTIL.deleteTable(table2_restore); + TEST_UTIL.deleteTable(table3_restore); + hba.close(); + } + + + /** + * Verify that a single table is restored using overwrite + * @throws Exception + */ + @Test + public void testFullRestoreSingleOverwrite() throws Exception { + + LOG.info("test full restore on a single table empty table"); + List tables = Lists.newArrayList(table1); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + + LOG.info("backup complete"); + + TableName[] tableset = new TableName[] { table1 }; + BackupAdmin client = getBackupAdmin(); + client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, null, + true)); + } + + /** + * Verify that a single table is restored using overwrite + * @throws Exception + */ + @Test + public void testFullRestoreSingleOverwriteCommand() throws Exception { + + LOG.info("test full restore on a single table empty table: command-line"); + List tables = Lists.newArrayList(table1); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + LOG.info("backup complete"); + TableName[] tableset = new TableName[] { table1 }; + //restore [tableMapping] + String[] args = new String[]{BACKUP_ROOT_DIR, backupId, + StringUtils.join(tableset, ","), "-overwrite" }; + // Run restore + int ret = ToolRunner.run(conf1, new RestoreDriver(), args); + assertTrue(ret==0); + + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table1)); + hba.close(); + + } + + /** + * Verify that multiple tables are restored to new tables using overwrite. + * @throws Exception + */ + @Test + public void testFullRestoreMultipleOverwrite() throws Exception { + LOG.info("create full backup image on multiple tables"); + + List tables = Lists.newArrayList(table2, table3); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + + TableName[] restore_tableset = new TableName[] { table2, table3 }; + BackupAdmin client = getBackupAdmin(); + client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, + false, restore_tableset, null, true)); + } + + /** + * Verify that multiple tables are restored to new tables using overwrite. + * @throws Exception + */ + @Test + public void testFullRestoreMultipleOverwriteCommand() throws Exception { + LOG.info("create full backup image on multiple tables: command-line"); + + List tables = Lists.newArrayList(table2, table3); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + + TableName[] restore_tableset = new TableName[] { table2, table3 }; + //restore [tableMapping] + String[] args = new String[]{BACKUP_ROOT_DIR, backupId, + StringUtils.join(restore_tableset, ","), "-overwrite" }; + // Run backup + int ret = ToolRunner.run(conf1, new RestoreDriver(), args); + + assertTrue(ret==0); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table2)); + assertTrue(hba.tableExists(table3)); + hba.close(); + } + + /** + * Verify that restore fails on a single table that does not exist. + * @throws Exception + */ + @Test(expected = IOException.class) + public void testFullRestoreSingleDNE() throws Exception { + + LOG.info("test restore fails on a single table that does not exist"); + List tables = Lists.newArrayList(table1); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + + LOG.info("backup complete"); + + TableName[] tableset = new TableName[] { TableName.valueOf("faketable") }; + TableName[] tablemap = new TableName[] { table1_restore }; + BackupAdmin client = getBackupAdmin(); + client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap, + false)); + } + + + /** + * Verify that restore fails on a single table that does not exist. + * @throws Exception + */ + @Test + public void testFullRestoreSingleDNECommand() throws Exception { + + LOG.info("test restore fails on a single table that does not exist: command-line"); + List tables = Lists.newArrayList(table1); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + + LOG.info("backup complete"); + + TableName[] tableset = new TableName[] { TableName.valueOf("faketable") }; + TableName[] tablemap = new TableName[] { table1_restore }; + String[] args = new String[]{BACKUP_ROOT_DIR, backupId, + StringUtils.join(tableset, ","), + StringUtils.join(tablemap, ",") }; + // Run restore + int ret = ToolRunner.run(conf1, new RestoreDriver(), args); + assertTrue(ret != 0); + + } + /** + * Verify that restore fails on multiple tables that do not exist. + * @throws Exception + */ + @Test(expected = IOException.class) + public void testFullRestoreMultipleDNE() throws Exception { + + LOG.info("test restore fails on multiple tables that do not exist"); + + List tables = Lists.newArrayList(table2, table3); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + + TableName[] restore_tableset + = new TableName[] { TableName.valueOf("faketable1"), TableName.valueOf("faketable2") }; + TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; + BackupAdmin client = getBackupAdmin(); + client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, + restore_tableset, tablemap, false)); + } + + /** + * Verify that restore fails on multiple tables that do not exist. + * @throws Exception + */ + @Test + public void testFullRestoreMultipleDNECommand() throws Exception { + + LOG.info("test restore fails on multiple tables that do not exist: command-line"); + + List tables = Lists.newArrayList(table2, table3); + String backupId = fullTableBackup(tables); + assertTrue(checkSucceeded(backupId)); + + TableName[] restore_tableset + = new TableName[] { TableName.valueOf("faketable1"), TableName.valueOf("faketable2") }; + TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; + String[] args = new String[]{BACKUP_ROOT_DIR, backupId, + StringUtils.join(restore_tableset, ","), + StringUtils.join(tablemap, ",") }; + // Run restore + int ret = ToolRunner.run(conf1, new RestoreDriver(), args); + assertTrue(ret != 0); + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java new file mode 100644 index 0000000..0f35026 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java @@ -0,0 +1,176 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.BackupAdmin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.hamcrest.CoreMatchers; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestIncrementalBackup extends TestBackupBase { + private static final Log LOG = LogFactory.getLog(TestIncrementalBackup.class); + //implement all test cases in 1 test since incremental backup/restore has dependencies + @Test + public void TestIncBackupRestore() throws Exception { + // #1 - create full backup for all tables + LOG.info("create full backup image for all tables"); + + List tables = Lists.newArrayList(table1, table2, table3, table4); + HBaseAdmin admin = null; + Connection conn = ConnectionFactory.createConnection(conf1); + admin = (HBaseAdmin) conn.getAdmin(); + + BackupRequest request = new BackupRequest(); + request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdFull = admin.getBackupAdmin().backupTables(request); + + assertTrue(checkSucceeded(backupIdFull)); + + // #2 - insert some data to table + HTable t1 = (HTable) conn.getTable(table1); + Put p1; + for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { + p1 = new Put(Bytes.toBytes("row-t1" + i)); + p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t1.put(p1); + } + + Assert.assertThat(TEST_UTIL.countRows(t1), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2)); + t1.close(); + + HTable t2 = (HTable) conn.getTable(table2); + Put p2; + for (int i = 0; i < 5; i++) { + p2 = new Put(Bytes.toBytes("row-t2" + i)); + p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t2.put(p2); + } + + Assert.assertThat(TEST_UTIL.countRows(t2), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5)); + t2.close(); + + // #3 - incremental backup for multiple tables + tables = Lists.newArrayList(table1, table2, table3); + request = new BackupRequest(); + request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) + .setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdIncMultiple = admin.getBackupAdmin().backupTables(request); + assertTrue(checkSucceeded(backupIdIncMultiple)); + + // #4 - restore full backup for all tables, without overwrite + TableName[] tablesRestoreFull = + new TableName[] { table1, table2, table3, table4 }; + + TableName[] tablesMapFull = + new TableName[] { table1_restore, table2_restore, table3_restore, table4_restore }; + + BackupAdmin client = getBackupAdmin(); + client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, + tablesRestoreFull, + tablesMapFull, false)); + + // #5.1 - check tables for full restore + HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin(); + assertTrue(hAdmin.tableExists(table1_restore)); + assertTrue(hAdmin.tableExists(table2_restore)); + assertTrue(hAdmin.tableExists(table3_restore)); + assertTrue(hAdmin.tableExists(table4_restore)); + + hAdmin.close(); + + // #5.2 - checking row count of tables for full restore + HTable hTable = (HTable) conn.getTable(table1_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH)); + hTable.close(); + + hTable = (HTable) conn.getTable(table2_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH)); + hTable.close(); + + hTable = (HTable) conn.getTable(table3_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0)); + hTable.close(); + + hTable = (HTable) conn.getTable(table4_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0)); + hTable.close(); + + // #6 - restore incremental backup for multiple tables, with overwrite + TableName[] tablesRestoreIncMultiple = + new TableName[] { table1, table2, table3 }; + TableName[] tablesMapIncMultiple = + new TableName[] { table1_restore, table2_restore, table3_restore }; + client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false, + tablesRestoreIncMultiple, tablesMapIncMultiple, true)); + + hTable = (HTable) conn.getTable(table1_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2)); + hTable.close(); + + hTable = (HTable) conn.getTable(table2_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5)); + hTable.close(); + + hTable = (HTable) conn.getTable(table3_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0)); + hTable.close(); + + // #7 - incremental backup for single, empty table + + tables = toList(table4.getNameAsString()); + request = new BackupRequest(); + request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) + .setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdIncEmpty = admin.getBackupAdmin().backupTables(request); + + + // #8 - restore incremental backup for single empty table, with overwrite + TableName[] tablesRestoreIncEmpty = new TableName[] { table4 }; + TableName[] tablesMapIncEmpty = new TableName[] { table4_restore }; + + client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncEmpty, false, + tablesRestoreIncEmpty, + tablesMapIncEmpty, true)); + + hTable = (HTable) conn.getTable(table4_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(0)); + hTable.close(); + admin.close(); + conn.close(); + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java new file mode 100644 index 0000000..ad11548 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java @@ -0,0 +1,139 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.BackupAdmin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.hamcrest.CoreMatchers; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +/** + * + * 1. Create table t1, t2 + * 2. Load data to t1, t2 + * 3 Full backup t1, t2 + * 4 Delete t2 + * 5 Load data to t1 + * 6 Incremental backup t1 + */ +@Category(LargeTests.class) +public class TestIncrementalBackupDeleteTable extends TestBackupBase { + private static final Log LOG = LogFactory.getLog(TestIncrementalBackupDeleteTable.class); + //implement all test cases in 1 test since incremental backup/restore has dependencies + @Test + public void TestIncBackupDeleteTable() throws Exception { + // #1 - create full backup for all tables + LOG.info("create full backup image for all tables"); + + List tables = Lists.newArrayList(table1, table2); + HBaseAdmin admin = null; + Connection conn = ConnectionFactory.createConnection(conf1); + admin = (HBaseAdmin) conn.getAdmin(); + + BackupRequest request = new BackupRequest(); + request.setBackupType(BackupType.FULL).setTableList(tables).setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdFull = admin.getBackupAdmin().backupTables(request); + + assertTrue(checkSucceeded(backupIdFull)); + + // #2 - insert some data to table table1 + HTable t1 = (HTable) conn.getTable(table1); + Put p1; + for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { + p1 = new Put(Bytes.toBytes("row-t1" + i)); + p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t1.put(p1); + } + + Assert.assertThat(TEST_UTIL.countRows(t1), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2)); + t1.close(); + + // Delete table table2 + admin.disableTable(table2); + admin.deleteTable(table2); + + // #3 - incremental backup for table1 + tables = Lists.newArrayList(table1); + request = new BackupRequest(); + request.setBackupType(BackupType.INCREMENTAL).setTableList(tables) + .setTargetRootDir(BACKUP_ROOT_DIR); + String backupIdIncMultiple = admin.getBackupAdmin().backupTables(request); + assertTrue(checkSucceeded(backupIdIncMultiple)); + + // #4 - restore full backup for all tables, without overwrite + TableName[] tablesRestoreFull = + new TableName[] { table1, table2}; + + TableName[] tablesMapFull = + new TableName[] { table1_restore, table2_restore }; + + BackupAdmin client = getBackupAdmin(); + client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, + tablesRestoreFull, + tablesMapFull, false)); + + // #5.1 - check tables for full restore + HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin(); + assertTrue(hAdmin.tableExists(table1_restore)); + assertTrue(hAdmin.tableExists(table2_restore)); + + + // #5.2 - checking row count of tables for full restore + HTable hTable = (HTable) conn.getTable(table1_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH)); + hTable.close(); + + hTable = (HTable) conn.getTable(table2_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH)); + hTable.close(); + + + // #6 - restore incremental backup for table1 + TableName[] tablesRestoreIncMultiple = + new TableName[] { table1 }; + TableName[] tablesMapIncMultiple = + new TableName[] { table1_restore }; + client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false, + tablesRestoreIncMultiple, tablesMapIncMultiple, true)); + + hTable = (HTable) conn.getTable(table1_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2)); + hTable.close(); + admin.close(); + conn.close(); + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupNoDataLoss.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupNoDataLoss.java new file mode 100644 index 0000000..c3ad7d4 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupNoDataLoss.java @@ -0,0 +1,124 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.BackupAdmin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.hamcrest.CoreMatchers; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestIncrementalBackupNoDataLoss extends TestBackupBase { + private static final Log LOG = LogFactory.getLog(TestIncrementalBackupNoDataLoss.class); + + // implement all test cases in 1 test since incremental backup/restore has dependencies + @Test + public void TestIncBackupRestore() throws Exception { + + // #1 - create full backup for all tables + LOG.info("create full backup image for all tables"); + List tables = Lists.newArrayList(table1, table2); + String backupIdFull = fullTableBackup(tables); + assertTrue(checkSucceeded(backupIdFull)); + Connection conn = ConnectionFactory.createConnection(conf1); + // #2 - insert some data to table + HTable t1 = (HTable) conn.getTable(table1); + Put p1; + for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { + p1 = new Put(Bytes.toBytes("row-t1" + i)); + p1.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t1.put(p1); + } + + Assert.assertThat(TEST_UTIL.countRows(t1), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2)); + t1.close(); + + HTable t2 = (HTable) conn.getTable(table2); + Put p2; + for (int i = 0; i < 5; i++) { + p2 = new Put(Bytes.toBytes("row-t2" + i)); + p2.addColumn(famName, qualName, Bytes.toBytes("val" + i)); + t2.put(p2); + } + + Assert.assertThat(TEST_UTIL.countRows(t2), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5)); + t2.close(); + + // #3 - incremental backup for table1 + + tables = Lists.newArrayList(table1); + String backupIdInc1 = incrementalTableBackup(tables); + assertTrue(checkSucceeded(backupIdInc1)); + + // #4 - incremental backup for table2 + + tables = Lists.newArrayList(table2); + String backupIdInc2 = incrementalTableBackup(tables); + assertTrue(checkSucceeded(backupIdInc2)); + // #5 - restore incremental backup for table1 + TableName[] tablesRestoreInc1 = new TableName[] { table1 }; + TableName[] tablesMapInc1 = new TableName[] { table1_restore }; + + if (TEST_UTIL.getAdmin().tableExists(table1_restore)) { + TEST_UTIL.deleteTable(table1_restore); + } + if (TEST_UTIL.getAdmin().tableExists(table2_restore)) { + TEST_UTIL.deleteTable(table2_restore); + } + + BackupAdmin client = getBackupAdmin(); + client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdInc1, false, tablesRestoreInc1, + tablesMapInc1, false)); + + HTable hTable = (HTable) conn.getTable(table1_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2)); + hTable.close(); + + // #5 - restore incremental backup for table2 + + TableName[] tablesRestoreInc2 = new TableName[] { table2 }; + TableName[] tablesMapInc2 = new TableName[] { table2_restore }; + + client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdInc2, false, tablesRestoreInc2, + tablesMapInc2, false)); + + hTable = (HTable) conn.getTable(table2_restore); + Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH + 5)); + hTable.close(); + + conn.close(); + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java new file mode 100644 index 0000000..e29a4a6 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category(LargeTests.class) +public class TestRemoteBackup extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestRemoteBackup.class); + + /** + * Verify that a remote full backup is created on a single table with data correctly. + * @throws Exception + */ + @Test + public void testFullBackupRemote() throws Exception { + + LOG.info("test remote full backup on a single table"); + + String backupId = backupTables(BackupType.FULL, + Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR); + assertTrue(checkSucceeded(backupId)); + + LOG.info("backup complete " + backupId); + } + +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java new file mode 100644 index 0000000..1be2aa2 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(LargeTests.class) +public class TestRemoteRestore extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestRemoteRestore.class); + + /** + * Verify that a remote restore on a single table is successful. + * @throws Exception + */ + @Test + public void testFullRestoreRemote() throws Exception { + + LOG.info("test remote full backup on a single table"); + String backupId = backupTables(BackupType.FULL, toList(table1.getNameAsString()), + BACKUP_REMOTE_ROOT_DIR); + LOG.info("backup complete"); + TableName[] tableset = new TableName[] { table1 }; + TableName[] tablemap = new TableName[] { table1_restore }; + getBackupAdmin().restore(createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false, tableset, + tablemap, false)); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table1_restore)); + TEST_UTIL.deleteTable(table1_restore); + hba.close(); + } + +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java new file mode 100644 index 0000000..d2308d1 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import static org.junit.Assert.assertTrue; + +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(LargeTests.class) +public class TestRestoreBoundaryTests extends TestBackupBase { + + private static final Log LOG = LogFactory.getLog(TestRestoreBoundaryTests.class); + + /** + * Verify that a single empty table is restored to a new table + * @throws Exception + */ + @Test + public void testFullRestoreSingleEmpty() throws Exception { + LOG.info("test full restore on a single table empty table"); + String backupId = fullTableBackup(toList(table1.getNameAsString())); + LOG.info("backup complete"); + TableName[] tableset = new TableName[] { table1 }; + TableName[] tablemap = new TableName[] { table1_restore }; + getBackupAdmin().restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap, + false)); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table1_restore)); + TEST_UTIL.deleteTable(table1_restore); + } + + /** + * Verify that multiple tables are restored to new tables. + * @throws Exception + */ + @Test + public void testFullRestoreMultipleEmpty() throws Exception { + LOG.info("create full backup image on multiple tables"); + + List tables = toList(table2.getNameAsString(), table3.getNameAsString()); + String backupId = fullTableBackup(tables); + TableName[] restore_tableset = new TableName[] { table2, table3}; + TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; + getBackupAdmin().restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, restore_tableset, + tablemap, + false)); + HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); + assertTrue(hba.tableExists(table2_restore)); + assertTrue(hba.tableExists(table3_restore)); + TEST_UTIL.deleteTable(table2_restore); + TEST_UTIL.deleteTable(table3_restore); + } +} \ No newline at end of file