diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreClientFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreClientFactory.java deleted file mode 100644 index b60ab21..0000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreClientFactory.java +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.backup; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.util.ReflectionUtils; - -@InterfaceAudience.Private -@InterfaceStability.Evolving -public final class BackupRestoreClientFactory { - private static final Log LOG = LogFactory.getLog(BackupRestoreClientFactory.class); - - private BackupRestoreClientFactory(){ - throw new AssertionError("Instantiating utility class..."); - } - - - /** - * Gets restore client implementation - * @param conf - configuration - * @return backup client - */ - public static RestoreClient getRestoreClient(Configuration conf) { - try{ - Class cls = - conf.getClassByName("org.apache.hadoop.hbase.backup.impl.RestoreClientImpl"); - - RestoreClient client = (RestoreClient) ReflectionUtils.newInstance(cls, conf); - client.setConf(conf); - return client; - } catch(Exception e){ - LOG.error("Can not instantiate RestoreClient. Make sure you have hbase-server jar in CLASSPATH", e); - } - return null; - } -} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreClient.java deleted file mode 100644 index 07f573e..0000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreClient.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.backup; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; - -@InterfaceAudience.Private -@InterfaceStability.Evolving -public interface RestoreClient { - - public void setConf(Configuration conf); - - /** - * Restore operation. - * @param backupRootDir The root dir for backup image - * @param backupId The backup id for image to be restored - * @param check True if only do dependency check - * @param sTableArray The array of tables to be restored - * @param tTableArray The array of mapping tables to restore to - * @param isOverwrite True then do restore overwrite if target table exists, otherwise fail the - * request if target table exists - * @throws IOException if any failure during restore - */ - public void restore( - String backupRootDir, - String backupId, boolean check, TableName[] sTableArray, - TableName[] tTableArray, boolean isOverwrite) throws IOException; -} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java index c78f0bc..7490d20 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java @@ -21,22 +21,21 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -@InterfaceAudience.Public -@InterfaceStability.Evolving - /** * POJO class for restore request */ +@InterfaceAudience.Public +@InterfaceStability.Evolving public class RestoreRequest { - - private String backupRootDir; - private String backupId; + + private String backupRootDir; + private String backupId; private boolean check = false; private TableName[] fromTables; private TableName[] toTables; private boolean overwrite = false; - - public RestoreRequest(){ + + public RestoreRequest() { } public String getBackupRootDir() { @@ -73,7 +72,6 @@ public class RestoreRequest { public RestoreRequest setFromTables(TableName[] fromTables) { this.fromTables = fromTables; return this; - } public TableName[] getToTables() { @@ -93,6 +91,4 @@ public class RestoreRequest { this.overwrite = overwrite; return this; } - - } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index f46c855..37b38a5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.backup.BackupRequest; -import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.security.SecurityCapability; @@ -916,6 +916,20 @@ public interface Admin extends Abortable, Closeable { /** + * Restore operation. Asynchronous version. + * @param request RestoreRequest instance + * @throws IOException + */ + public Future restoreTablesAsync(final RestoreRequest request) throws IOException; + + /** + * Restore operation. Synchronous version. + * @param request RestoreRequest instance + * @throws IOException + */ + public void restoreTables(final RestoreRequest userRequest) throws IOException; + + /** * Modify an existing table, more IRB friendly version. Asynchronous operation. This means that * it may be a while before your schema change is updated across all of the table. * You can use Future.get(long, TimeUnit) to wait on the operation to complete. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java index 4134cc8..b19dea1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java @@ -73,6 +73,21 @@ public interface BackupAdmin extends Closeable{ public Future backupTablesAsync(final BackupRequest userRequest) throws IOException; /** + * Restore backup + * @param request - restore request + * @throws IOException exception + */ + public void restore(RestoreRequest request) throws IOException; + + /** + * Restore backup + * @param request - restore request + * @return Future which client can wait on + * @throws IOException exception + */ + public Future restoreAsync(RestoreRequest request) throws IOException; + + /** * Describe backup image command * @param backupId - backup id * @return backup info @@ -156,12 +171,4 @@ public interface BackupAdmin extends Closeable{ * @throws IOException exception */ public void removeFromBackupSet(String name, String[] tables) throws IOException; - - /** - * Restore backup - * @param request - restore request - * @throws IOException exception - */ - public void restore(RestoreRequest request) throws IOException; - } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index c245ef4..1ac43f9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -1408,6 +1408,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } @Override + public MasterProtos.RestoreTablesResponse restoreTables( + RpcController controller, + MasterProtos.RestoreTablesRequest request) throws ServiceException { + return stub.restoreTables(controller, request); + } + + @Override public MasterProtos.AddColumnResponse addColumn( RpcController controller, MasterProtos.AddColumnRequest request) throws ServiceException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 8ba26ba..2b1dcd1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.backup.BackupRequest; +import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.backup.util.BackupClientUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -148,6 +149,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRespon import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; @@ -218,6 +221,7 @@ public class HBaseAdmin implements Admin { private final int retryLongerMultiplier; private final int syncWaitTimeout; private final long backupWaitTimeout; + private final long restoreWaitTimeout; private boolean aborted; private int operationTimeout; @@ -246,6 +250,8 @@ public class HBaseAdmin implements Admin { "hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min this.backupWaitTimeout = this.conf.getInt( "hbase.client.backup.wait.timeout.sec", 24 * 3600); // 24 h + this.restoreWaitTimeout = this.conf.getInt( + "hbase.client.restore.wait.timeout.sec", 24 * 3600); // 24 h this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf); this.ng = this.connection.getNonceGenerator(); @@ -1630,6 +1636,50 @@ public class HBaseAdmin implements Admin { } } + /** + * Restore operation. + * @param request RestoreRequest instance + * @throws IOException + */ + @Override + public Future restoreTablesAsync(final RestoreRequest userRequest) throws IOException { + RestoreTablesResponse response = executeCallable( + new MasterCallable(getConnection()) { + @Override + public RestoreTablesResponse call(int callTimeout) throws ServiceException { + try { + RestoreTablesRequest request = RequestConverter.buildRestoreTablesRequest( + userRequest.getBackupRootDir(), userRequest.getBackupId(), + userRequest.isCheck(), userRequest.getFromTables(), userRequest.getToTables(), + userRequest.isOverwrite(), ng.getNonceGroup(), ng.newNonce()); + return master.restoreTables(null, request); + } catch (IOException ioe) { + throw new ServiceException(ioe); + } + } + }); + return new TableRestoreFuture(this, TableName.BACKUP_TABLE_NAME, response); + } + + @Override + public void restoreTables(final RestoreRequest userRequest) throws IOException { + get(restoreTablesAsync(userRequest), + restoreWaitTimeout, TimeUnit.SECONDS); + } + + private static class TableRestoreFuture extends TableFuture { + public TableRestoreFuture(final HBaseAdmin admin, final TableName tableName, + final RestoreTablesResponse response) { + super(admin, tableName, + (response != null) ? response.getProcId() : null); + } + + @Override + public String getOperationType() { + return "RESTORE"; + } + } + @Override public Future modifyTable(final TableName tableName, final HTableDescriptor htd) throws IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java index a67d3d2..8e08fd0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java @@ -36,9 +36,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; import org.apache.hadoop.hbase.backup.BackupRequest; -import org.apache.hadoop.hbase.backup.BackupRestoreClientFactory; import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.backup.RestoreClient; import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; import org.apache.hadoop.hbase.backup.util.BackupClientUtil; @@ -407,10 +405,12 @@ public class HBaseBackupAdmin implements BackupAdmin { @Override public void restore(RestoreRequest request) throws IOException { - RestoreClient client = BackupRestoreClientFactory.getRestoreClient(admin.getConfiguration()); - client.restore(request.getBackupRootDir(), request.getBackupId(), request.isCheck(), - request.getFromTables(), request.getToTables(), request.isOverwrite()); + admin.restoreTables(request); + } + @Override + public Future restoreAsync(RestoreRequest request) throws IOException { + return admin.restoreTablesAsync(request); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 5babfb8..eba01ce 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -105,6 +105,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableReques import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; @@ -1286,6 +1287,29 @@ public final class RequestConverter { return builder.build(); } + public static RestoreTablesRequest buildRestoreTablesRequest(String backupRootDir, + String backupId, boolean check, TableName[] sTableList, + TableName[] tTableList, boolean isOverwrite, final long nonceGroup, final long nonce) + throws IOException { + RestoreTablesRequest.Builder builder = RestoreTablesRequest.newBuilder(); + builder.setBackupId(backupId).setBackupRootDir(backupRootDir); + builder.setDependencyCheckOnly(check).setOverwrite(isOverwrite); + if (sTableList != null) { + for (TableName table : sTableList) { + builder.addTables(ProtobufUtil.toProtoTableName(table)); + } + } else { + throw new IOException("Source table list shouldn't be empty"); + } + if (tTableList != null) { + for (TableName table : tTableList) { + builder.addTargetTables(ProtobufUtil.toProtoTableName(table)); + } + } + builder.setNonceGroup(nonceGroup).setNonce(nonce); + return builder.build(); + } + /** * Creates a protocol buffer GetSchemaAlterStatusRequest * diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index b5b6b4c..8b383c7 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -90,6 +90,88 @@ public final class MasterProtos { // @@protoc_insertion_point(enum_scope:hbase.pb.MasterSwitchType) } + /** + * Protobuf enum {@code hbase.pb.RestoreTablesState} + */ + public enum RestoreTablesState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * VALIDATION = 1; + */ + VALIDATION(0, 1), + /** + * RESTORE_IMAGES = 2; + */ + RESTORE_IMAGES(1, 2), + ; + + /** + * VALIDATION = 1; + */ + public static final int VALIDATION_VALUE = 1; + /** + * RESTORE_IMAGES = 2; + */ + public static final int RESTORE_IMAGES_VALUE = 2; + + + public final int getNumber() { return value; } + + public static RestoreTablesState valueOf(int value) { + switch (value) { + case 1: return VALIDATION; + case 2: return RESTORE_IMAGES; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public RestoreTablesState findValueByNumber(int number) { + return RestoreTablesState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.getDescriptor().getEnumTypes().get(1); + } + + private static final RestoreTablesState[] VALUES = values(); + + public static RestoreTablesState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private RestoreTablesState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.RestoreTablesState) + } + public interface AddColumnRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -60998,220 +61080,2464 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:hbase.pb.BackupTablesResponse) } - /** - * Protobuf service {@code hbase.pb.MasterService} - */ - public static abstract class MasterService - implements com.google.protobuf.Service { - protected MasterService() {} + public interface RestoreTablesRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { - public interface Interface { - /** - * rpc GetSchemaAlterStatus(.hbase.pb.GetSchemaAlterStatusRequest) returns (.hbase.pb.GetSchemaAlterStatusResponse); - * - *
-       ** Used by the client to get the number of regions that have received the updated schema 
-       * 
- */ - public abstract void getSchemaAlterStatus( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest request, - com.google.protobuf.RpcCallback done); + // required string backup_id = 1; + /** + * required string backup_id = 1; + */ + boolean hasBackupId(); + /** + * required string backup_id = 1; + */ + java.lang.String getBackupId(); + /** + * required string backup_id = 1; + */ + com.google.protobuf.ByteString + getBackupIdBytes(); - /** - * rpc GetTableDescriptors(.hbase.pb.GetTableDescriptorsRequest) returns (.hbase.pb.GetTableDescriptorsResponse); - * - *
-       ** Get list of TableDescriptors for requested tables. 
-       * 
- */ - public abstract void getTableDescriptors( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest request, - com.google.protobuf.RpcCallback done); + // repeated .hbase.pb.TableName tables = 2; + /** + * repeated .hbase.pb.TableName tables = 2; + */ + java.util.List + getTablesList(); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + int getTablesCount(); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + java.util.List + getTablesOrBuilderList(); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index); - /** - * rpc GetTableNames(.hbase.pb.GetTableNamesRequest) returns (.hbase.pb.GetTableNamesResponse); - * - *
-       ** Get the list of table names. 
-       * 
- */ - public abstract void getTableNames( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest request, - com.google.protobuf.RpcCallback done); + // repeated .hbase.pb.TableName target_tables = 3; + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + java.util.List + getTargetTablesList(); + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTargetTables(int index); + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + int getTargetTablesCount(); + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + java.util.List + getTargetTablesOrBuilderList(); + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTargetTablesOrBuilder( + int index); - /** - * rpc GetClusterStatus(.hbase.pb.GetClusterStatusRequest) returns (.hbase.pb.GetClusterStatusResponse); - * - *
-       ** Return cluster status. 
-       * 
- */ - public abstract void getClusterStatus( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest request, - com.google.protobuf.RpcCallback done); + // required string backup_root_dir = 4; + /** + * required string backup_root_dir = 4; + */ + boolean hasBackupRootDir(); + /** + * required string backup_root_dir = 4; + */ + java.lang.String getBackupRootDir(); + /** + * required string backup_root_dir = 4; + */ + com.google.protobuf.ByteString + getBackupRootDirBytes(); - /** - * rpc IsMasterRunning(.hbase.pb.IsMasterRunningRequest) returns (.hbase.pb.IsMasterRunningResponse); - * - *
-       ** return true if master is available 
-       * 
- */ - public abstract void isMasterRunning( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request, - com.google.protobuf.RpcCallback done); + // optional bool dependency_check_only = 5; + /** + * optional bool dependency_check_only = 5; + */ + boolean hasDependencyCheckOnly(); + /** + * optional bool dependency_check_only = 5; + */ + boolean getDependencyCheckOnly(); - /** - * rpc AddColumn(.hbase.pb.AddColumnRequest) returns (.hbase.pb.AddColumnResponse); - * - *
-       ** Adds a column to the specified table. 
-       * 
- */ - public abstract void addColumn( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest request, - com.google.protobuf.RpcCallback done); + // optional bool overwrite = 6; + /** + * optional bool overwrite = 6; + */ + boolean hasOverwrite(); + /** + * optional bool overwrite = 6; + */ + boolean getOverwrite(); - /** - * rpc DeleteColumn(.hbase.pb.DeleteColumnRequest) returns (.hbase.pb.DeleteColumnResponse); - * - *
-       ** Deletes a column from the specified table. Table must be disabled. 
-       * 
- */ - public abstract void deleteColumn( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest request, - com.google.protobuf.RpcCallback done); + // optional uint64 nonce_group = 7 [default = 0]; + /** + * optional uint64 nonce_group = 7 [default = 0]; + */ + boolean hasNonceGroup(); + /** + * optional uint64 nonce_group = 7 [default = 0]; + */ + long getNonceGroup(); - /** - * rpc ModifyColumn(.hbase.pb.ModifyColumnRequest) returns (.hbase.pb.ModifyColumnResponse); - * - *
-       ** Modifies an existing column on the specified table. 
-       * 
- */ - public abstract void modifyColumn( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest request, - com.google.protobuf.RpcCallback done); + // optional uint64 nonce = 8 [default = 0]; + /** + * optional uint64 nonce = 8 [default = 0]; + */ + boolean hasNonce(); + /** + * optional uint64 nonce = 8 [default = 0]; + */ + long getNonce(); + } + /** + * Protobuf type {@code hbase.pb.RestoreTablesRequest} + */ + public static final class RestoreTablesRequest extends + com.google.protobuf.GeneratedMessage + implements RestoreTablesRequestOrBuilder { + // Use RestoreTablesRequest.newBuilder() to construct. + private RestoreTablesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RestoreTablesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - /** - * rpc MoveRegion(.hbase.pb.MoveRegionRequest) returns (.hbase.pb.MoveRegionResponse); - * - *
-       ** Move the region region to the destination server. 
-       * 
- */ - public abstract void moveRegion( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest request, - com.google.protobuf.RpcCallback done); + private static final RestoreTablesRequest defaultInstance; + public static RestoreTablesRequest getDefaultInstance() { + return defaultInstance; + } - /** - * rpc DispatchMergingRegions(.hbase.pb.DispatchMergingRegionsRequest) returns (.hbase.pb.DispatchMergingRegionsResponse); - * - *
-       ** Master dispatch merging the regions 
-       * 
- */ - public abstract void dispatchMergingRegions( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest request, - com.google.protobuf.RpcCallback done); + public RestoreTablesRequest getDefaultInstanceForType() { + return defaultInstance; + } - /** - * rpc AssignRegion(.hbase.pb.AssignRegionRequest) returns (.hbase.pb.AssignRegionResponse); - * - *
-       ** Assign a region to a server chosen at random. 
-       * 
- */ - public abstract void assignRegion( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest request, - com.google.protobuf.RpcCallback done); + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RestoreTablesRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + backupId_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + tables_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + targetTables_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + targetTables_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + case 34: { + bitField0_ |= 0x00000002; + backupRootDir_ = input.readBytes(); + break; + } + case 40: { + bitField0_ |= 0x00000004; + dependencyCheckOnly_ = input.readBool(); + break; + } + case 48: { + bitField0_ |= 0x00000008; + overwrite_ = input.readBool(); + break; + } + case 56: { + bitField0_ |= 0x00000010; + nonceGroup_ = input.readUInt64(); + break; + } + case 64: { + bitField0_ |= 0x00000020; + nonce_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + } + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + targetTables_ = java.util.Collections.unmodifiableList(targetTables_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_descriptor; + } - /** - * rpc UnassignRegion(.hbase.pb.UnassignRegionRequest) returns (.hbase.pb.UnassignRegionResponse); - * - *
-       **
-       * Unassign a region from current hosting regionserver.  Region will then be
-       * assigned to a regionserver chosen at random.  Region could be reassigned
-       * back to the same server.  Use MoveRegion if you want
-       * to control the region movement.
-       * 
- */ - public abstract void unassignRegion( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest request, - com.google.protobuf.RpcCallback done); + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.Builder.class); + } - /** - * rpc OfflineRegion(.hbase.pb.OfflineRegionRequest) returns (.hbase.pb.OfflineRegionResponse); - * - *
-       **
-       * Offline a region from the assignment manager's in-memory state.  The
-       * region should be in a closed state and there will be no attempt to
-       * automatically reassign the region as in unassign.   This is a special
-       * method, and should only be used by experts or hbck.
-       * 
- */ - public abstract void offlineRegion( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest request, - com.google.protobuf.RpcCallback done); + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RestoreTablesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RestoreTablesRequest(input, extensionRegistry); + } + }; - /** - * rpc DeleteTable(.hbase.pb.DeleteTableRequest) returns (.hbase.pb.DeleteTableResponse); - * - *
-       ** Deletes a table 
-       * 
- */ - public abstract void deleteTable( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest request, - com.google.protobuf.RpcCallback done); + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } - /** - * rpc truncateTable(.hbase.pb.TruncateTableRequest) returns (.hbase.pb.TruncateTableResponse); - * - *
-       ** Truncate a table 
-       * 
- */ - public abstract void truncateTable( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest request, - com.google.protobuf.RpcCallback done); + private int bitField0_; + // required string backup_id = 1; + public static final int BACKUP_ID_FIELD_NUMBER = 1; + private java.lang.Object backupId_; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + backupId_ = s; + } + return s; + } + } + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } - /** - * rpc EnableTable(.hbase.pb.EnableTableRequest) returns (.hbase.pb.EnableTableResponse); - * - *
-       ** Puts the table on-line (only needed if table has been previously taken offline) 
-       * 
- */ - public abstract void enableTable( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest request, - com.google.protobuf.RpcCallback done); + // repeated .hbase.pb.TableName tables = 2; + public static final int TABLES_FIELD_NUMBER = 2; + private java.util.List tables_; + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List getTablesList() { + return tables_; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List + getTablesOrBuilderList() { + return tables_; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public int getTablesCount() { + return tables_.size(); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { + return tables_.get(index); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + return tables_.get(index); + } - /** - * rpc DisableTable(.hbase.pb.DisableTableRequest) returns (.hbase.pb.DisableTableResponse); - * - *
-       ** Take table offline 
-       * 
+ // repeated .hbase.pb.TableName target_tables = 3; + public static final int TARGET_TABLES_FIELD_NUMBER = 3; + private java.util.List targetTables_; + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public java.util.List getTargetTablesList() { + return targetTables_; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public java.util.List + getTargetTablesOrBuilderList() { + return targetTables_; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public int getTargetTablesCount() { + return targetTables_.size(); + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTargetTables(int index) { + return targetTables_.get(index); + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTargetTablesOrBuilder( + int index) { + return targetTables_.get(index); + } + + // required string backup_root_dir = 4; + public static final int BACKUP_ROOT_DIR_FIELD_NUMBER = 4; + private java.lang.Object backupRootDir_; + /** + * required string backup_root_dir = 4; + */ + public boolean hasBackupRootDir() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string backup_root_dir = 4; + */ + public java.lang.String getBackupRootDir() { + java.lang.Object ref = backupRootDir_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + backupRootDir_ = s; + } + return s; + } + } + /** + * required string backup_root_dir = 4; + */ + public com.google.protobuf.ByteString + getBackupRootDirBytes() { + java.lang.Object ref = backupRootDir_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupRootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional bool dependency_check_only = 5; + public static final int DEPENDENCY_CHECK_ONLY_FIELD_NUMBER = 5; + private boolean dependencyCheckOnly_; + /** + * optional bool dependency_check_only = 5; + */ + public boolean hasDependencyCheckOnly() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bool dependency_check_only = 5; + */ + public boolean getDependencyCheckOnly() { + return dependencyCheckOnly_; + } + + // optional bool overwrite = 6; + public static final int OVERWRITE_FIELD_NUMBER = 6; + private boolean overwrite_; + /** + * optional bool overwrite = 6; + */ + public boolean hasOverwrite() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bool overwrite = 6; + */ + public boolean getOverwrite() { + return overwrite_; + } + + // optional uint64 nonce_group = 7 [default = 0]; + public static final int NONCE_GROUP_FIELD_NUMBER = 7; + private long nonceGroup_; + /** + * optional uint64 nonce_group = 7 [default = 0]; + */ + public boolean hasNonceGroup() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional uint64 nonce_group = 7 [default = 0]; + */ + public long getNonceGroup() { + return nonceGroup_; + } + + // optional uint64 nonce = 8 [default = 0]; + public static final int NONCE_FIELD_NUMBER = 8; + private long nonce_; + /** + * optional uint64 nonce = 8 [default = 0]; + */ + public boolean hasNonce() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional uint64 nonce = 8 [default = 0]; + */ + public long getNonce() { + return nonce_; + } + + private void initFields() { + backupId_ = ""; + tables_ = java.util.Collections.emptyList(); + targetTables_ = java.util.Collections.emptyList(); + backupRootDir_ = ""; + dependencyCheckOnly_ = false; + overwrite_ = false; + nonceGroup_ = 0L; + nonce_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBackupId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasBackupRootDir()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getTargetTablesCount(); i++) { + if (!getTargetTables(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getBackupIdBytes()); + } + for (int i = 0; i < tables_.size(); i++) { + output.writeMessage(2, tables_.get(i)); + } + for (int i = 0; i < targetTables_.size(); i++) { + output.writeMessage(3, targetTables_.get(i)); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(4, getBackupRootDirBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBool(5, dependencyCheckOnly_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBool(6, overwrite_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeUInt64(7, nonceGroup_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeUInt64(8, nonce_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getBackupIdBytes()); + } + for (int i = 0; i < tables_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tables_.get(i)); + } + for (int i = 0; i < targetTables_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, targetTables_.get(i)); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, getBackupRootDirBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(5, dependencyCheckOnly_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(6, overwrite_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(7, nonceGroup_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(8, nonce_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest) obj; + + boolean result = true; + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); + } + result = result && getTablesList() + .equals(other.getTablesList()); + result = result && getTargetTablesList() + .equals(other.getTargetTablesList()); + result = result && (hasBackupRootDir() == other.hasBackupRootDir()); + if (hasBackupRootDir()) { + result = result && getBackupRootDir() + .equals(other.getBackupRootDir()); + } + result = result && (hasDependencyCheckOnly() == other.hasDependencyCheckOnly()); + if (hasDependencyCheckOnly()) { + result = result && (getDependencyCheckOnly() + == other.getDependencyCheckOnly()); + } + result = result && (hasOverwrite() == other.hasOverwrite()); + if (hasOverwrite()) { + result = result && (getOverwrite() + == other.getOverwrite()); + } + result = result && (hasNonceGroup() == other.hasNonceGroup()); + if (hasNonceGroup()) { + result = result && (getNonceGroup() + == other.getNonceGroup()); + } + result = result && (hasNonce() == other.hasNonce()); + if (hasNonce()) { + result = result && (getNonce() + == other.getNonce()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + } + if (getTablesCount() > 0) { + hash = (37 * hash) + TABLES_FIELD_NUMBER; + hash = (53 * hash) + getTablesList().hashCode(); + } + if (getTargetTablesCount() > 0) { + hash = (37 * hash) + TARGET_TABLES_FIELD_NUMBER; + hash = (53 * hash) + getTargetTablesList().hashCode(); + } + if (hasBackupRootDir()) { + hash = (37 * hash) + BACKUP_ROOT_DIR_FIELD_NUMBER; + hash = (53 * hash) + getBackupRootDir().hashCode(); + } + if (hasDependencyCheckOnly()) { + hash = (37 * hash) + DEPENDENCY_CHECK_ONLY_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getDependencyCheckOnly()); + } + if (hasOverwrite()) { + hash = (37 * hash) + OVERWRITE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getOverwrite()); + } + if (hasNonceGroup()) { + hash = (37 * hash) + NONCE_GROUP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getNonceGroup()); + } + if (hasNonce()) { + hash = (37 * hash) + NONCE_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getNonce()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.RestoreTablesRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTablesFieldBuilder(); + getTargetTablesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + backupId_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + tablesBuilder_.clear(); + } + if (targetTablesBuilder_ == null) { + targetTables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + targetTablesBuilder_.clear(); + } + backupRootDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000008); + dependencyCheckOnly_ = false; + bitField0_ = (bitField0_ & ~0x00000010); + overwrite_ = false; + bitField0_ = (bitField0_ & ~0x00000020); + nonceGroup_ = 0L; + bitField0_ = (bitField0_ & ~0x00000040); + nonce_ = 0L; + bitField0_ = (bitField0_ & ~0x00000080); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.backupId_ = backupId_; + if (tablesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.tables_ = tables_; + } else { + result.tables_ = tablesBuilder_.build(); + } + if (targetTablesBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + targetTables_ = java.util.Collections.unmodifiableList(targetTables_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.targetTables_ = targetTables_; + } else { + result.targetTables_ = targetTablesBuilder_.build(); + } + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000002; + } + result.backupRootDir_ = backupRootDir_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000004; + } + result.dependencyCheckOnly_ = dependencyCheckOnly_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000008; + } + result.overwrite_ = overwrite_; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000010; + } + result.nonceGroup_ = nonceGroup_; + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000020; + } + result.nonce_ = nonce_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.getDefaultInstance()) return this; + if (other.hasBackupId()) { + bitField0_ |= 0x00000001; + backupId_ = other.backupId_; + onChanged(); + } + if (tablesBuilder_ == null) { + if (!other.tables_.isEmpty()) { + if (tables_.isEmpty()) { + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureTablesIsMutable(); + tables_.addAll(other.tables_); + } + onChanged(); + } + } else { + if (!other.tables_.isEmpty()) { + if (tablesBuilder_.isEmpty()) { + tablesBuilder_.dispose(); + tablesBuilder_ = null; + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000002); + tablesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTablesFieldBuilder() : null; + } else { + tablesBuilder_.addAllMessages(other.tables_); + } + } + } + if (targetTablesBuilder_ == null) { + if (!other.targetTables_.isEmpty()) { + if (targetTables_.isEmpty()) { + targetTables_ = other.targetTables_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureTargetTablesIsMutable(); + targetTables_.addAll(other.targetTables_); + } + onChanged(); + } + } else { + if (!other.targetTables_.isEmpty()) { + if (targetTablesBuilder_.isEmpty()) { + targetTablesBuilder_.dispose(); + targetTablesBuilder_ = null; + targetTables_ = other.targetTables_; + bitField0_ = (bitField0_ & ~0x00000004); + targetTablesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTargetTablesFieldBuilder() : null; + } else { + targetTablesBuilder_.addAllMessages(other.targetTables_); + } + } + } + if (other.hasBackupRootDir()) { + bitField0_ |= 0x00000008; + backupRootDir_ = other.backupRootDir_; + onChanged(); + } + if (other.hasDependencyCheckOnly()) { + setDependencyCheckOnly(other.getDependencyCheckOnly()); + } + if (other.hasOverwrite()) { + setOverwrite(other.getOverwrite()); + } + if (other.hasNonceGroup()) { + setNonceGroup(other.getNonceGroup()); + } + if (other.hasNonce()) { + setNonce(other.getNonce()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBackupId()) { + + return false; + } + if (!hasBackupRootDir()) { + + return false; + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getTargetTablesCount(); i++) { + if (!getTargetTables(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string backup_id = 1; + private java.lang.Object backupId_ = ""; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string backup_id = 1; + */ + public Builder setBackupId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; + onChanged(); + return this; + } + /** + * required string backup_id = 1; + */ + public Builder clearBackupId() { + bitField0_ = (bitField0_ & ~0x00000001); + backupId_ = getDefaultInstance().getBackupId(); + onChanged(); + return this; + } + /** + * required string backup_id = 1; + */ + public Builder setBackupIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; + onChanged(); + return this; + } + + // repeated .hbase.pb.TableName tables = 2; + private java.util.List tables_ = + java.util.Collections.emptyList(); + private void ensureTablesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = new java.util.ArrayList(tables_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tablesBuilder_; + + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List getTablesList() { + if (tablesBuilder_ == null) { + return java.util.Collections.unmodifiableList(tables_); + } else { + return tablesBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public int getTablesCount() { + if (tablesBuilder_ == null) { + return tables_.size(); + } else { + return tablesBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); + } else { + return tablesBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.set(index, value); + onChanged(); + } else { + tablesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.set(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(value); + onChanged(); + } else { + tablesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(index, value); + onChanged(); + } else { + tablesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addAllTables( + java.lang.Iterable values) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + super.addAll(values, tables_); + onChanged(); + } else { + tablesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder clearTables() { + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + tablesBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder removeTables(int index) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.remove(index); + onChanged(); + } else { + tablesBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTablesBuilder( + int index) { + return getTablesFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); } else { + return tablesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List + getTablesOrBuilderList() { + if (tablesBuilder_ != null) { + return tablesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tables_); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder() { + return getTablesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder( + int index) { + return getTablesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List + getTablesBuilderList() { + return getTablesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTablesFieldBuilder() { + if (tablesBuilder_ == null) { + tablesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tables_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + tables_ = null; + } + return tablesBuilder_; + } + + // repeated .hbase.pb.TableName target_tables = 3; + private java.util.List targetTables_ = + java.util.Collections.emptyList(); + private void ensureTargetTablesIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + targetTables_ = new java.util.ArrayList(targetTables_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> targetTablesBuilder_; + + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public java.util.List getTargetTablesList() { + if (targetTablesBuilder_ == null) { + return java.util.Collections.unmodifiableList(targetTables_); + } else { + return targetTablesBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public int getTargetTablesCount() { + if (targetTablesBuilder_ == null) { + return targetTables_.size(); + } else { + return targetTablesBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTargetTables(int index) { + if (targetTablesBuilder_ == null) { + return targetTables_.get(index); + } else { + return targetTablesBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder setTargetTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (targetTablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTargetTablesIsMutable(); + targetTables_.set(index, value); + onChanged(); + } else { + targetTablesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder setTargetTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (targetTablesBuilder_ == null) { + ensureTargetTablesIsMutable(); + targetTables_.set(index, builderForValue.build()); + onChanged(); + } else { + targetTablesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder addTargetTables(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (targetTablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTargetTablesIsMutable(); + targetTables_.add(value); + onChanged(); + } else { + targetTablesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder addTargetTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (targetTablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTargetTablesIsMutable(); + targetTables_.add(index, value); + onChanged(); + } else { + targetTablesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder addTargetTables( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (targetTablesBuilder_ == null) { + ensureTargetTablesIsMutable(); + targetTables_.add(builderForValue.build()); + onChanged(); + } else { + targetTablesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder addTargetTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (targetTablesBuilder_ == null) { + ensureTargetTablesIsMutable(); + targetTables_.add(index, builderForValue.build()); + onChanged(); + } else { + targetTablesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder addAllTargetTables( + java.lang.Iterable values) { + if (targetTablesBuilder_ == null) { + ensureTargetTablesIsMutable(); + super.addAll(values, targetTables_); + onChanged(); + } else { + targetTablesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder clearTargetTables() { + if (targetTablesBuilder_ == null) { + targetTables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + targetTablesBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder removeTargetTables(int index) { + if (targetTablesBuilder_ == null) { + ensureTargetTablesIsMutable(); + targetTables_.remove(index); + onChanged(); + } else { + targetTablesBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTargetTablesBuilder( + int index) { + return getTargetTablesFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTargetTablesOrBuilder( + int index) { + if (targetTablesBuilder_ == null) { + return targetTables_.get(index); } else { + return targetTablesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public java.util.List + getTargetTablesOrBuilderList() { + if (targetTablesBuilder_ != null) { + return targetTablesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(targetTables_); + } + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTargetTablesBuilder() { + return getTargetTablesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTargetTablesBuilder( + int index) { + return getTargetTablesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public java.util.List + getTargetTablesBuilderList() { + return getTargetTablesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTargetTablesFieldBuilder() { + if (targetTablesBuilder_ == null) { + targetTablesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + targetTables_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + targetTables_ = null; + } + return targetTablesBuilder_; + } + + // required string backup_root_dir = 4; + private java.lang.Object backupRootDir_ = ""; + /** + * required string backup_root_dir = 4; + */ + public boolean hasBackupRootDir() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required string backup_root_dir = 4; + */ + public java.lang.String getBackupRootDir() { + java.lang.Object ref = backupRootDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupRootDir_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string backup_root_dir = 4; + */ + public com.google.protobuf.ByteString + getBackupRootDirBytes() { + java.lang.Object ref = backupRootDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupRootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string backup_root_dir = 4; + */ + public Builder setBackupRootDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + backupRootDir_ = value; + onChanged(); + return this; + } + /** + * required string backup_root_dir = 4; + */ + public Builder clearBackupRootDir() { + bitField0_ = (bitField0_ & ~0x00000008); + backupRootDir_ = getDefaultInstance().getBackupRootDir(); + onChanged(); + return this; + } + /** + * required string backup_root_dir = 4; + */ + public Builder setBackupRootDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + backupRootDir_ = value; + onChanged(); + return this; + } + + // optional bool dependency_check_only = 5; + private boolean dependencyCheckOnly_ ; + /** + * optional bool dependency_check_only = 5; + */ + public boolean hasDependencyCheckOnly() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional bool dependency_check_only = 5; + */ + public boolean getDependencyCheckOnly() { + return dependencyCheckOnly_; + } + /** + * optional bool dependency_check_only = 5; + */ + public Builder setDependencyCheckOnly(boolean value) { + bitField0_ |= 0x00000010; + dependencyCheckOnly_ = value; + onChanged(); + return this; + } + /** + * optional bool dependency_check_only = 5; + */ + public Builder clearDependencyCheckOnly() { + bitField0_ = (bitField0_ & ~0x00000010); + dependencyCheckOnly_ = false; + onChanged(); + return this; + } + + // optional bool overwrite = 6; + private boolean overwrite_ ; + /** + * optional bool overwrite = 6; + */ + public boolean hasOverwrite() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional bool overwrite = 6; + */ + public boolean getOverwrite() { + return overwrite_; + } + /** + * optional bool overwrite = 6; + */ + public Builder setOverwrite(boolean value) { + bitField0_ |= 0x00000020; + overwrite_ = value; + onChanged(); + return this; + } + /** + * optional bool overwrite = 6; + */ + public Builder clearOverwrite() { + bitField0_ = (bitField0_ & ~0x00000020); + overwrite_ = false; + onChanged(); + return this; + } + + // optional uint64 nonce_group = 7 [default = 0]; + private long nonceGroup_ ; + /** + * optional uint64 nonce_group = 7 [default = 0]; + */ + public boolean hasNonceGroup() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional uint64 nonce_group = 7 [default = 0]; + */ + public long getNonceGroup() { + return nonceGroup_; + } + /** + * optional uint64 nonce_group = 7 [default = 0]; + */ + public Builder setNonceGroup(long value) { + bitField0_ |= 0x00000040; + nonceGroup_ = value; + onChanged(); + return this; + } + /** + * optional uint64 nonce_group = 7 [default = 0]; + */ + public Builder clearNonceGroup() { + bitField0_ = (bitField0_ & ~0x00000040); + nonceGroup_ = 0L; + onChanged(); + return this; + } + + // optional uint64 nonce = 8 [default = 0]; + private long nonce_ ; + /** + * optional uint64 nonce = 8 [default = 0]; + */ + public boolean hasNonce() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional uint64 nonce = 8 [default = 0]; + */ + public long getNonce() { + return nonce_; + } + /** + * optional uint64 nonce = 8 [default = 0]; + */ + public Builder setNonce(long value) { + bitField0_ |= 0x00000080; + nonce_ = value; + onChanged(); + return this; + } + /** + * optional uint64 nonce = 8 [default = 0]; + */ + public Builder clearNonce() { + bitField0_ = (bitField0_ & ~0x00000080); + nonce_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.RestoreTablesRequest) + } + + static { + defaultInstance = new RestoreTablesRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RestoreTablesRequest) + } + + public interface RestoreTablesResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional uint64 proc_id = 1; + /** + * optional uint64 proc_id = 1; + */ + boolean hasProcId(); + /** + * optional uint64 proc_id = 1; + */ + long getProcId(); + } + /** + * Protobuf type {@code hbase.pb.RestoreTablesResponse} + */ + public static final class RestoreTablesResponse extends + com.google.protobuf.GeneratedMessage + implements RestoreTablesResponseOrBuilder { + // Use RestoreTablesResponse.newBuilder() to construct. + private RestoreTablesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RestoreTablesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RestoreTablesResponse defaultInstance; + public static RestoreTablesResponse getDefaultInstance() { + return defaultInstance; + } + + public RestoreTablesResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RestoreTablesResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RestoreTablesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RestoreTablesResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional uint64 proc_id = 1; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + + private void initFields() { + procId_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, procId_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, procId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse) obj; + + boolean result = true; + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.RestoreTablesResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.procId_ = procId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional uint64 proc_id = 1; + private long procId_ ; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); + return this; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000001); + procId_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.RestoreTablesResponse) + } + + static { + defaultInstance = new RestoreTablesResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RestoreTablesResponse) + } + + /** + * Protobuf service {@code hbase.pb.MasterService} + */ + public static abstract class MasterService + implements com.google.protobuf.Service { + protected MasterService() {} + + public interface Interface { + /** + * rpc GetSchemaAlterStatus(.hbase.pb.GetSchemaAlterStatusRequest) returns (.hbase.pb.GetSchemaAlterStatusResponse); + * + *
+       ** Used by the client to get the number of regions that have received the updated schema 
+       * 
+ */ + public abstract void getSchemaAlterStatus( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetTableDescriptors(.hbase.pb.GetTableDescriptorsRequest) returns (.hbase.pb.GetTableDescriptorsResponse); + * + *
+       ** Get list of TableDescriptors for requested tables. 
+       * 
+ */ + public abstract void getTableDescriptors( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetTableNames(.hbase.pb.GetTableNamesRequest) returns (.hbase.pb.GetTableNamesResponse); + * + *
+       ** Get the list of table names. 
+       * 
+ */ + public abstract void getTableNames( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetClusterStatus(.hbase.pb.GetClusterStatusRequest) returns (.hbase.pb.GetClusterStatusResponse); + * + *
+       ** Return cluster status. 
+       * 
+ */ + public abstract void getClusterStatus( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc IsMasterRunning(.hbase.pb.IsMasterRunningRequest) returns (.hbase.pb.IsMasterRunningResponse); + * + *
+       ** return true if master is available 
+       * 
+ */ + public abstract void isMasterRunning( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc AddColumn(.hbase.pb.AddColumnRequest) returns (.hbase.pb.AddColumnResponse); + * + *
+       ** Adds a column to the specified table. 
+       * 
+ */ + public abstract void addColumn( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc DeleteColumn(.hbase.pb.DeleteColumnRequest) returns (.hbase.pb.DeleteColumnResponse); + * + *
+       ** Deletes a column from the specified table. Table must be disabled. 
+       * 
+ */ + public abstract void deleteColumn( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc ModifyColumn(.hbase.pb.ModifyColumnRequest) returns (.hbase.pb.ModifyColumnResponse); + * + *
+       ** Modifies an existing column on the specified table. 
+       * 
+ */ + public abstract void modifyColumn( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc MoveRegion(.hbase.pb.MoveRegionRequest) returns (.hbase.pb.MoveRegionResponse); + * + *
+       ** Move the region region to the destination server. 
+       * 
+ */ + public abstract void moveRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc DispatchMergingRegions(.hbase.pb.DispatchMergingRegionsRequest) returns (.hbase.pb.DispatchMergingRegionsResponse); + * + *
+       ** Master dispatch merging the regions 
+       * 
+ */ + public abstract void dispatchMergingRegions( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc AssignRegion(.hbase.pb.AssignRegionRequest) returns (.hbase.pb.AssignRegionResponse); + * + *
+       ** Assign a region to a server chosen at random. 
+       * 
+ */ + public abstract void assignRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc UnassignRegion(.hbase.pb.UnassignRegionRequest) returns (.hbase.pb.UnassignRegionResponse); + * + *
+       **
+       * Unassign a region from current hosting regionserver.  Region will then be
+       * assigned to a regionserver chosen at random.  Region could be reassigned
+       * back to the same server.  Use MoveRegion if you want
+       * to control the region movement.
+       * 
+ */ + public abstract void unassignRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc OfflineRegion(.hbase.pb.OfflineRegionRequest) returns (.hbase.pb.OfflineRegionResponse); + * + *
+       **
+       * Offline a region from the assignment manager's in-memory state.  The
+       * region should be in a closed state and there will be no attempt to
+       * automatically reassign the region as in unassign.   This is a special
+       * method, and should only be used by experts or hbck.
+       * 
+ */ + public abstract void offlineRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc DeleteTable(.hbase.pb.DeleteTableRequest) returns (.hbase.pb.DeleteTableResponse); + * + *
+       ** Deletes a table 
+       * 
+ */ + public abstract void deleteTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc truncateTable(.hbase.pb.TruncateTableRequest) returns (.hbase.pb.TruncateTableResponse); + * + *
+       ** Truncate a table 
+       * 
+ */ + public abstract void truncateTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc EnableTable(.hbase.pb.EnableTableRequest) returns (.hbase.pb.EnableTableResponse); + * + *
+       ** Puts the table on-line (only needed if table has been previously taken offline) 
+       * 
+ */ + public abstract void enableTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc DisableTable(.hbase.pb.DisableTableRequest) returns (.hbase.pb.DisableTableResponse); + * + *
+       ** Take table offline 
+       * 
*/ public abstract void disableTable( com.google.protobuf.RpcController controller, @@ -61731,6 +64057,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc restoreTables(.hbase.pb.RestoreTablesRequest) returns (.hbase.pb.RestoreTablesResponse); + * + *
+       ** restore table set 
+       * 
+ */ + public abstract void restoreTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -62200,6 +64538,14 @@ public final class MasterProtos { impl.backupTables(controller, request, done); } + @java.lang.Override + public void restoreTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request, + com.google.protobuf.RpcCallback done) { + impl.restoreTables(controller, request, done); + } + }; } @@ -62338,6 +64684,8 @@ public final class MasterProtos { return impl.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request); case 57: return impl.backupTables(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)request); + case 58: + return impl.restoreTables(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -62468,6 +64816,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); case 57: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance(); + case 58: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -62598,6 +64948,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); case 57: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(); + case 58: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -63331,6 +65683,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc restoreTables(.hbase.pb.RestoreTablesRequest) returns (.hbase.pb.RestoreTablesResponse); + * + *
+     ** restore table set 
+     * 
+ */ + public abstract void restoreTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -63643,6 +66007,11 @@ public final class MasterProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 58: + this.restoreTables(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -63773,6 +66142,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); case 57: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance(); + case 58: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -63903,6 +66274,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); case 57: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(); + case 58: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -64793,6 +67166,21 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance())); } + + public void restoreTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(58), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -65090,6 +67478,11 @@ public final class MasterProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse restoreTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -65794,6 +68187,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse restoreTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(58), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:hbase.pb.MasterService) @@ -66354,6 +68759,16 @@ public final class MasterProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RestoreTablesRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_RestoreTablesRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RestoreTablesResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_RestoreTablesResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -66560,142 +68975,152 @@ public final class MasterProtos { "bleName\022\027\n\017target_root_dir\030\003 \002(\t\022\017\n\007work" + "ers\030\004 \001(\003\022\021\n\tbandwidth\030\005 \001(\003\":\n\024BackupTa" + "blesResponse\022\017\n\007proc_id\030\001 \001(\004\022\021\n\tbackup_" + - "id\030\002 \001(\t*(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022" + - "\t\n\005MERGE\020\0012\242)\n\rMasterService\022e\n\024GetSchem" + - "aAlterStatus\022%.hbase.pb.GetSchemaAlterSt", - "atusRequest\032&.hbase.pb.GetSchemaAlterSta" + - "tusResponse\022b\n\023GetTableDescriptors\022$.hba" + - "se.pb.GetTableDescriptorsRequest\032%.hbase" + - ".pb.GetTableDescriptorsResponse\022P\n\rGetTa" + - "bleNames\022\036.hbase.pb.GetTableNamesRequest" + - "\032\037.hbase.pb.GetTableNamesResponse\022Y\n\020Get" + - "ClusterStatus\022!.hbase.pb.GetClusterStatu" + - "sRequest\032\".hbase.pb.GetClusterStatusResp" + - "onse\022V\n\017IsMasterRunning\022 .hbase.pb.IsMas" + - "terRunningRequest\032!.hbase.pb.IsMasterRun", - "ningResponse\022D\n\tAddColumn\022\032.hbase.pb.Add" + - "ColumnRequest\032\033.hbase.pb.AddColumnRespon" + - "se\022M\n\014DeleteColumn\022\035.hbase.pb.DeleteColu" + - "mnRequest\032\036.hbase.pb.DeleteColumnRespons" + - "e\022M\n\014ModifyColumn\022\035.hbase.pb.ModifyColum" + - "nRequest\032\036.hbase.pb.ModifyColumnResponse" + - "\022G\n\nMoveRegion\022\033.hbase.pb.MoveRegionRequ" + - "est\032\034.hbase.pb.MoveRegionResponse\022k\n\026Dis" + - "patchMergingRegions\022\'.hbase.pb.DispatchM" + - "ergingRegionsRequest\032(.hbase.pb.Dispatch", - "MergingRegionsResponse\022M\n\014AssignRegion\022\035" + - ".hbase.pb.AssignRegionRequest\032\036.hbase.pb" + - ".AssignRegionResponse\022S\n\016UnassignRegion\022" + - "\037.hbase.pb.UnassignRegionRequest\032 .hbase" + - ".pb.UnassignRegionResponse\022P\n\rOfflineReg" + - "ion\022\036.hbase.pb.OfflineRegionRequest\032\037.hb" + - "ase.pb.OfflineRegionResponse\022J\n\013DeleteTa" + - "ble\022\034.hbase.pb.DeleteTableRequest\032\035.hbas" + - "e.pb.DeleteTableResponse\022P\n\rtruncateTabl" + - "e\022\036.hbase.pb.TruncateTableRequest\032\037.hbas", - "e.pb.TruncateTableResponse\022J\n\013EnableTabl" + - "e\022\034.hbase.pb.EnableTableRequest\032\035.hbase." + - "pb.EnableTableResponse\022M\n\014DisableTable\022\035" + - ".hbase.pb.DisableTableRequest\032\036.hbase.pb" + - ".DisableTableResponse\022J\n\013ModifyTable\022\034.h" + - "base.pb.ModifyTableRequest\032\035.hbase.pb.Mo" + - "difyTableResponse\022J\n\013CreateTable\022\034.hbase" + - ".pb.CreateTableRequest\032\035.hbase.pb.Create" + - "TableResponse\022A\n\010Shutdown\022\031.hbase.pb.Shu" + - "tdownRequest\032\032.hbase.pb.ShutdownResponse", - "\022G\n\nStopMaster\022\033.hbase.pb.StopMasterRequ" + - "est\032\034.hbase.pb.StopMasterResponse\022>\n\007Bal" + - "ance\022\030.hbase.pb.BalanceRequest\032\031.hbase.p" + - "b.BalanceResponse\022_\n\022SetBalancerRunning\022" + - "#.hbase.pb.SetBalancerRunningRequest\032$.h" + - "base.pb.SetBalancerRunningResponse\022\\\n\021Is" + - "BalancerEnabled\022\".hbase.pb.IsBalancerEna" + - "bledRequest\032#.hbase.pb.IsBalancerEnabled" + - "Response\022k\n\026SetSplitOrMergeEnabled\022\'.hba" + - "se.pb.SetSplitOrMergeEnabledRequest\032(.hb", - "ase.pb.SetSplitOrMergeEnabledResponse\022h\n" + - "\025IsSplitOrMergeEnabled\022&.hbase.pb.IsSpli" + - "tOrMergeEnabledRequest\032\'.hbase.pb.IsSpli" + - "tOrMergeEnabledResponse\022D\n\tNormalize\022\032.h" + - "base.pb.NormalizeRequest\032\033.hbase.pb.Norm" + - "alizeResponse\022e\n\024SetNormalizerRunning\022%." + - "hbase.pb.SetNormalizerRunningRequest\032&.h" + - "base.pb.SetNormalizerRunningResponse\022b\n\023" + - "IsNormalizerEnabled\022$.hbase.pb.IsNormali" + - "zerEnabledRequest\032%.hbase.pb.IsNormalize", - "rEnabledResponse\022S\n\016RunCatalogScan\022\037.hba" + - "se.pb.RunCatalogScanRequest\032 .hbase.pb.R" + - "unCatalogScanResponse\022e\n\024EnableCatalogJa" + - "nitor\022%.hbase.pb.EnableCatalogJanitorReq" + - "uest\032&.hbase.pb.EnableCatalogJanitorResp" + - "onse\022n\n\027IsCatalogJanitorEnabled\022(.hbase." + - "pb.IsCatalogJanitorEnabledRequest\032).hbas" + - "e.pb.IsCatalogJanitorEnabledResponse\022^\n\021" + - "ExecMasterService\022#.hbase.pb.Coprocessor" + - "ServiceRequest\032$.hbase.pb.CoprocessorSer", - "viceResponse\022A\n\010Snapshot\022\031.hbase.pb.Snap" + - "shotRequest\032\032.hbase.pb.SnapshotResponse\022" + - "h\n\025GetCompletedSnapshots\022&.hbase.pb.GetC" + - "ompletedSnapshotsRequest\032\'.hbase.pb.GetC" + - "ompletedSnapshotsResponse\022S\n\016DeleteSnaps" + - "hot\022\037.hbase.pb.DeleteSnapshotRequest\032 .h" + - "base.pb.DeleteSnapshotResponse\022S\n\016IsSnap" + - "shotDone\022\037.hbase.pb.IsSnapshotDoneReques" + - "t\032 .hbase.pb.IsSnapshotDoneResponse\022V\n\017R" + - "estoreSnapshot\022 .hbase.pb.RestoreSnapsho", - "tRequest\032!.hbase.pb.RestoreSnapshotRespo" + - "nse\022h\n\025IsRestoreSnapshotDone\022&.hbase.pb." + - "IsRestoreSnapshotDoneRequest\032\'.hbase.pb." + - "IsRestoreSnapshotDoneResponse\022P\n\rExecPro" + - "cedure\022\036.hbase.pb.ExecProcedureRequest\032\037" + - ".hbase.pb.ExecProcedureResponse\022W\n\024ExecP" + - "rocedureWithRet\022\036.hbase.pb.ExecProcedure" + - "Request\032\037.hbase.pb.ExecProcedureResponse" + - "\022V\n\017IsProcedureDone\022 .hbase.pb.IsProcedu" + - "reDoneRequest\032!.hbase.pb.IsProcedureDone", - "Response\022V\n\017ModifyNamespace\022 .hbase.pb.M" + - "odifyNamespaceRequest\032!.hbase.pb.ModifyN" + - "amespaceResponse\022V\n\017CreateNamespace\022 .hb" + - "ase.pb.CreateNamespaceRequest\032!.hbase.pb" + - ".CreateNamespaceResponse\022V\n\017DeleteNamesp" + - "ace\022 .hbase.pb.DeleteNamespaceRequest\032!." + - "hbase.pb.DeleteNamespaceResponse\022k\n\026GetN" + - "amespaceDescriptor\022\'.hbase.pb.GetNamespa" + - "ceDescriptorRequest\032(.hbase.pb.GetNamesp" + - "aceDescriptorResponse\022q\n\030ListNamespaceDe", - "scriptors\022).hbase.pb.ListNamespaceDescri" + - "ptorsRequest\032*.hbase.pb.ListNamespaceDes" + - "criptorsResponse\022\206\001\n\037ListTableDescriptor" + - "sByNamespace\0220.hbase.pb.ListTableDescrip" + - "torsByNamespaceRequest\0321.hbase.pb.ListTa" + - "bleDescriptorsByNamespaceResponse\022t\n\031Lis" + - "tTableNamesByNamespace\022*.hbase.pb.ListTa" + - "bleNamesByNamespaceRequest\032+.hbase.pb.Li" + - "stTableNamesByNamespaceResponse\022P\n\rGetTa" + - "bleState\022\036.hbase.pb.GetTableStateRequest", - "\032\037.hbase.pb.GetTableStateResponse\022A\n\010Set" + - "Quota\022\031.hbase.pb.SetQuotaRequest\032\032.hbase" + - ".pb.SetQuotaResponse\022x\n\037getLastMajorComp" + - "actionTimestamp\022).hbase.pb.MajorCompacti" + - "onTimestampRequest\032*.hbase.pb.MajorCompa" + - "ctionTimestampResponse\022\212\001\n(getLastMajorC" + - "ompactionTimestampForRegion\0222.hbase.pb.M" + - "ajorCompactionTimestampForRegionRequest\032" + - "*.hbase.pb.MajorCompactionTimestampRespo" + - "nse\022_\n\022getProcedureResult\022#.hbase.pb.Get", - "ProcedureResultRequest\032$.hbase.pb.GetPro" + - "cedureResultResponse\022h\n\027getSecurityCapab" + - "ilities\022%.hbase.pb.SecurityCapabilitiesR" + - "equest\032&.hbase.pb.SecurityCapabilitiesRe" + - "sponse\022S\n\016AbortProcedure\022\037.hbase.pb.Abor" + - "tProcedureRequest\032 .hbase.pb.AbortProced" + - "ureResponse\022S\n\016ListProcedures\022\037.hbase.pb" + - ".ListProceduresRequest\032 .hbase.pb.ListPr" + - "oceduresResponse\022M\n\014backupTables\022\035.hbase" + - ".pb.BackupTablesRequest\032\036.hbase.pb.Backu", - "pTablesResponseBB\n*org.apache.hadoop.hba" + - "se.protobuf.generatedB\014MasterProtosH\001\210\001\001" + - "\240\001\001" + "id\030\002 \001(\t\"\357\001\n\024RestoreTablesRequest\022\021\n\tbac" + + "kup_id\030\001 \002(\t\022#\n\006tables\030\002 \003(\0132\023.hbase.pb." + + "TableName\022*\n\rtarget_tables\030\003 \003(\0132\023.hbase", + ".pb.TableName\022\027\n\017backup_root_dir\030\004 \002(\t\022\035" + + "\n\025dependency_check_only\030\005 \001(\010\022\021\n\toverwri" + + "te\030\006 \001(\010\022\026\n\013nonce_group\030\007 \001(\004:\0010\022\020\n\005nonc" + + "e\030\010 \001(\004:\0010\"(\n\025RestoreTablesResponse\022\017\n\007p" + + "roc_id\030\001 \001(\004*(\n\020MasterSwitchType\022\t\n\005SPLI" + + "T\020\000\022\t\n\005MERGE\020\001*8\n\022RestoreTablesState\022\016\n\n" + + "VALIDATION\020\001\022\022\n\016RESTORE_IMAGES\020\0022\364)\n\rMas" + + "terService\022e\n\024GetSchemaAlterStatus\022%.hba" + + "se.pb.GetSchemaAlterStatusRequest\032&.hbas" + + "e.pb.GetSchemaAlterStatusResponse\022b\n\023Get", + "TableDescriptors\022$.hbase.pb.GetTableDesc" + + "riptorsRequest\032%.hbase.pb.GetTableDescri" + + "ptorsResponse\022P\n\rGetTableNames\022\036.hbase.p" + + "b.GetTableNamesRequest\032\037.hbase.pb.GetTab" + + "leNamesResponse\022Y\n\020GetClusterStatus\022!.hb" + + "ase.pb.GetClusterStatusRequest\032\".hbase.p" + + "b.GetClusterStatusResponse\022V\n\017IsMasterRu" + + "nning\022 .hbase.pb.IsMasterRunningRequest\032" + + "!.hbase.pb.IsMasterRunningResponse\022D\n\tAd" + + "dColumn\022\032.hbase.pb.AddColumnRequest\032\033.hb", + "ase.pb.AddColumnResponse\022M\n\014DeleteColumn" + + "\022\035.hbase.pb.DeleteColumnRequest\032\036.hbase." + + "pb.DeleteColumnResponse\022M\n\014ModifyColumn\022" + + "\035.hbase.pb.ModifyColumnRequest\032\036.hbase.p" + + "b.ModifyColumnResponse\022G\n\nMoveRegion\022\033.h" + + "base.pb.MoveRegionRequest\032\034.hbase.pb.Mov" + + "eRegionResponse\022k\n\026DispatchMergingRegion" + + "s\022\'.hbase.pb.DispatchMergingRegionsReque" + + "st\032(.hbase.pb.DispatchMergingRegionsResp" + + "onse\022M\n\014AssignRegion\022\035.hbase.pb.AssignRe", + "gionRequest\032\036.hbase.pb.AssignRegionRespo" + + "nse\022S\n\016UnassignRegion\022\037.hbase.pb.Unassig" + + "nRegionRequest\032 .hbase.pb.UnassignRegion" + + "Response\022P\n\rOfflineRegion\022\036.hbase.pb.Off" + + "lineRegionRequest\032\037.hbase.pb.OfflineRegi" + + "onResponse\022J\n\013DeleteTable\022\034.hbase.pb.Del" + + "eteTableRequest\032\035.hbase.pb.DeleteTableRe" + + "sponse\022P\n\rtruncateTable\022\036.hbase.pb.Trunc" + + "ateTableRequest\032\037.hbase.pb.TruncateTable" + + "Response\022J\n\013EnableTable\022\034.hbase.pb.Enabl", + "eTableRequest\032\035.hbase.pb.EnableTableResp" + + "onse\022M\n\014DisableTable\022\035.hbase.pb.DisableT" + + "ableRequest\032\036.hbase.pb.DisableTableRespo" + + "nse\022J\n\013ModifyTable\022\034.hbase.pb.ModifyTabl" + + "eRequest\032\035.hbase.pb.ModifyTableResponse\022" + + "J\n\013CreateTable\022\034.hbase.pb.CreateTableReq" + + "uest\032\035.hbase.pb.CreateTableResponse\022A\n\010S" + + "hutdown\022\031.hbase.pb.ShutdownRequest\032\032.hba" + + "se.pb.ShutdownResponse\022G\n\nStopMaster\022\033.h" + + "base.pb.StopMasterRequest\032\034.hbase.pb.Sto", + "pMasterResponse\022>\n\007Balance\022\030.hbase.pb.Ba" + + "lanceRequest\032\031.hbase.pb.BalanceResponse\022" + + "_\n\022SetBalancerRunning\022#.hbase.pb.SetBala" + + "ncerRunningRequest\032$.hbase.pb.SetBalance" + + "rRunningResponse\022\\\n\021IsBalancerEnabled\022\"." + + "hbase.pb.IsBalancerEnabledRequest\032#.hbas" + + "e.pb.IsBalancerEnabledResponse\022k\n\026SetSpl" + + "itOrMergeEnabled\022\'.hbase.pb.SetSplitOrMe" + + "rgeEnabledRequest\032(.hbase.pb.SetSplitOrM" + + "ergeEnabledResponse\022h\n\025IsSplitOrMergeEna", + "bled\022&.hbase.pb.IsSplitOrMergeEnabledReq" + + "uest\032\'.hbase.pb.IsSplitOrMergeEnabledRes" + + "ponse\022D\n\tNormalize\022\032.hbase.pb.NormalizeR" + + "equest\032\033.hbase.pb.NormalizeResponse\022e\n\024S" + + "etNormalizerRunning\022%.hbase.pb.SetNormal" + + "izerRunningRequest\032&.hbase.pb.SetNormali" + + "zerRunningResponse\022b\n\023IsNormalizerEnable" + + "d\022$.hbase.pb.IsNormalizerEnabledRequest\032" + + "%.hbase.pb.IsNormalizerEnabledResponse\022S" + + "\n\016RunCatalogScan\022\037.hbase.pb.RunCatalogSc", + "anRequest\032 .hbase.pb.RunCatalogScanRespo" + + "nse\022e\n\024EnableCatalogJanitor\022%.hbase.pb.E" + + "nableCatalogJanitorRequest\032&.hbase.pb.En" + + "ableCatalogJanitorResponse\022n\n\027IsCatalogJ" + + "anitorEnabled\022(.hbase.pb.IsCatalogJanito" + + "rEnabledRequest\032).hbase.pb.IsCatalogJani" + + "torEnabledResponse\022^\n\021ExecMasterService\022" + + "#.hbase.pb.CoprocessorServiceRequest\032$.h" + + "base.pb.CoprocessorServiceResponse\022A\n\010Sn" + + "apshot\022\031.hbase.pb.SnapshotRequest\032\032.hbas", + "e.pb.SnapshotResponse\022h\n\025GetCompletedSna" + + "pshots\022&.hbase.pb.GetCompletedSnapshotsR" + + "equest\032\'.hbase.pb.GetCompletedSnapshotsR" + + "esponse\022S\n\016DeleteSnapshot\022\037.hbase.pb.Del" + + "eteSnapshotRequest\032 .hbase.pb.DeleteSnap" + + "shotResponse\022S\n\016IsSnapshotDone\022\037.hbase.p" + + "b.IsSnapshotDoneRequest\032 .hbase.pb.IsSna" + + "pshotDoneResponse\022V\n\017RestoreSnapshot\022 .h" + + "base.pb.RestoreSnapshotRequest\032!.hbase.p" + + "b.RestoreSnapshotResponse\022h\n\025IsRestoreSn", + "apshotDone\022&.hbase.pb.IsRestoreSnapshotD" + + "oneRequest\032\'.hbase.pb.IsRestoreSnapshotD" + + "oneResponse\022P\n\rExecProcedure\022\036.hbase.pb." + + "ExecProcedureRequest\032\037.hbase.pb.ExecProc" + + "edureResponse\022W\n\024ExecProcedureWithRet\022\036." + + "hbase.pb.ExecProcedureRequest\032\037.hbase.pb" + + ".ExecProcedureResponse\022V\n\017IsProcedureDon" + + "e\022 .hbase.pb.IsProcedureDoneRequest\032!.hb" + + "ase.pb.IsProcedureDoneResponse\022V\n\017Modify" + + "Namespace\022 .hbase.pb.ModifyNamespaceRequ", + "est\032!.hbase.pb.ModifyNamespaceResponse\022V" + + "\n\017CreateNamespace\022 .hbase.pb.CreateNames" + + "paceRequest\032!.hbase.pb.CreateNamespaceRe" + + "sponse\022V\n\017DeleteNamespace\022 .hbase.pb.Del" + + "eteNamespaceRequest\032!.hbase.pb.DeleteNam" + + "espaceResponse\022k\n\026GetNamespaceDescriptor" + + "\022\'.hbase.pb.GetNamespaceDescriptorReques" + + "t\032(.hbase.pb.GetNamespaceDescriptorRespo" + + "nse\022q\n\030ListNamespaceDescriptors\022).hbase." + + "pb.ListNamespaceDescriptorsRequest\032*.hba", + "se.pb.ListNamespaceDescriptorsResponse\022\206" + + "\001\n\037ListTableDescriptorsByNamespace\0220.hba" + + "se.pb.ListTableDescriptorsByNamespaceReq" + + "uest\0321.hbase.pb.ListTableDescriptorsByNa" + + "mespaceResponse\022t\n\031ListTableNamesByNames" + + "pace\022*.hbase.pb.ListTableNamesByNamespac" + + "eRequest\032+.hbase.pb.ListTableNamesByName" + + "spaceResponse\022P\n\rGetTableState\022\036.hbase.p" + + "b.GetTableStateRequest\032\037.hbase.pb.GetTab" + + "leStateResponse\022A\n\010SetQuota\022\031.hbase.pb.S", + "etQuotaRequest\032\032.hbase.pb.SetQuotaRespon" + + "se\022x\n\037getLastMajorCompactionTimestamp\022)." + + "hbase.pb.MajorCompactionTimestampRequest" + + "\032*.hbase.pb.MajorCompactionTimestampResp" + + "onse\022\212\001\n(getLastMajorCompactionTimestamp" + + "ForRegion\0222.hbase.pb.MajorCompactionTime" + + "stampForRegionRequest\032*.hbase.pb.MajorCo" + + "mpactionTimestampResponse\022_\n\022getProcedur" + + "eResult\022#.hbase.pb.GetProcedureResultReq" + + "uest\032$.hbase.pb.GetProcedureResultRespon", + "se\022h\n\027getSecurityCapabilities\022%.hbase.pb" + + ".SecurityCapabilitiesRequest\032&.hbase.pb." + + "SecurityCapabilitiesResponse\022S\n\016AbortPro" + + "cedure\022\037.hbase.pb.AbortProcedureRequest\032" + + " .hbase.pb.AbortProcedureResponse\022S\n\016Lis" + + "tProcedures\022\037.hbase.pb.ListProceduresReq" + + "uest\032 .hbase.pb.ListProceduresResponse\022M" + + "\n\014backupTables\022\035.hbase.pb.BackupTablesRe" + + "quest\032\036.hbase.pb.BackupTablesResponse\022P\n" + + "\rrestoreTables\022\036.hbase.pb.RestoreTablesR", + "equest\032\037.hbase.pb.RestoreTablesResponseB" + + "B\n*org.apache.hadoop.hbase.protobuf.gene" + + "ratedB\014MasterProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -67368,6 +69793,18 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BackupTablesResponse_descriptor, new java.lang.String[] { "ProcId", "BackupId", }); + internal_static_hbase_pb_RestoreTablesRequest_descriptor = + getDescriptor().getMessageTypes().get(111); + internal_static_hbase_pb_RestoreTablesRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_RestoreTablesRequest_descriptor, + new java.lang.String[] { "BackupId", "Tables", "TargetTables", "BackupRootDir", "DependencyCheckOnly", "Overwrite", "NonceGroup", "Nonce", }); + internal_static_hbase_pb_RestoreTablesResponse_descriptor = + getDescriptor().getMessageTypes().get(112); + internal_static_hbase_pb_RestoreTablesResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_RestoreTablesResponse_descriptor, + new java.lang.String[] { "ProcId", }); return null; } }; diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto index 6431c73..66a11b4 100644 --- a/hbase-protocol/src/main/protobuf/Master.proto +++ b/hbase-protocol/src/main/protobuf/Master.proto @@ -554,6 +554,26 @@ message BackupTablesResponse { optional string backup_id = 2; } +enum RestoreTablesState { + VALIDATION = 1; + RESTORE_IMAGES = 2; +} + +message RestoreTablesRequest { + required string backup_id = 1; + repeated TableName tables = 2; + repeated TableName target_tables = 3; + required string backup_root_dir = 4; + optional bool dependency_check_only = 5; + optional bool overwrite = 6; + optional uint64 nonce_group = 7 [default = 0]; + optional uint64 nonce = 8 [default = 0]; +} + +message RestoreTablesResponse { + optional uint64 proc_id = 1; +} + service MasterService { /** Used by the client to get the number of regions that have received the updated schema */ rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest) @@ -832,4 +852,8 @@ service MasterService { /** backup table set */ rpc backupTables(BackupTablesRequest) returns(BackupTablesResponse); + + /** restore table set */ + rpc restoreTables(RestoreTablesRequest) + returns(RestoreTablesResponse); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java index c66ac6e..3fd0c33 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java @@ -30,6 +30,8 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; import org.apache.hadoop.hbase.backup.util.BackupServerUtil; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; +import org.apache.hadoop.hbase.client.BackupAdmin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.util.AbstractHBaseTool; @@ -152,11 +154,10 @@ public class RestoreDriver extends AbstractHBaseTool { return -4; } - - RestoreClient client = BackupRestoreClientFactory.getRestoreClient(getConf()); - try{ - client.restore(backupRootDir, backupId, check, sTableArray, - tTableArray, isOverwrite); + try (final Connection conn = ConnectionFactory.createConnection(conf); + BackupAdmin client = conn.getAdmin().getBackupAdmin();) { + client.restore(RestoreServerUtil.createRestoreRequest(backupRootDir, backupId, check, + sTableArray, tTableArray, isOverwrite)); } catch (Exception e){ e.printStackTrace(); return -5; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java deleted file mode 100644 index 7f23ce0..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java +++ /dev/null @@ -1,300 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup.impl; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map.Entry; -import java.util.TreeSet; - -import org.apache.commons.lang.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.backup.HBackupFileSystem; -import org.apache.hadoop.hbase.backup.RestoreClient; -import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; -import org.apache.hadoop.hbase.backup.util.BackupClientUtil; -import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; - -/** - * The main class which interprets the given arguments and trigger restore operation. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public final class RestoreClientImpl implements RestoreClient { - - private static final Log LOG = LogFactory.getLog(RestoreClientImpl.class); - private Configuration conf; - - public RestoreClientImpl() { - } - - @Override - public void setConf(Configuration conf) { - this.conf = conf; - } - - - - /** - * Restore operation. Stage 1: validate backupManifest, and check target tables - * @param backupRootDir The root dir for backup image - * @param backupId The backup id for image to be restored - * @param check True if only do dependency check - * @param sTableArray The array of tables to be restored - * @param tTableArray The array of mapping tables to restore to - * @param isOverwrite True then do restore overwrite if target table exists, otherwise fail the - * request if target table exists - * @throws IOException if any failure during restore - */ - @Override - public void restore(String backupRootDir, - String backupId, boolean check, TableName[] sTableArray, - TableName[] tTableArray, boolean isOverwrite) throws IOException { - - HashMap backupManifestMap = new HashMap<>(); - // check and load backup image manifest for the tables - Path rootPath = new Path(backupRootDir); - HBackupFileSystem.checkImageManifestExist(backupManifestMap, sTableArray, conf, rootPath, - backupId); - try { - // Check and validate the backup image and its dependencies - if (check) { - if (validate(backupManifestMap)) { - LOG.info("Checking backup images: ok"); - } else { - String errMsg = "Some dependencies are missing for restore"; - LOG.error(errMsg); - throw new IOException(errMsg); - } - } - - if (tTableArray == null) { - tTableArray = sTableArray; - } - // check the target tables - checkTargetTables(tTableArray, isOverwrite); - // start restore process - restoreStage(backupManifestMap, sTableArray, tTableArray, isOverwrite); - LOG.info("Restore for " + Arrays.asList(sTableArray) + " are successful!"); - } catch (IOException e) { - LOG.error("ERROR: restore failed with error: " + e.getMessage()); - throw e; - } - - } - - private boolean validate(HashMap backupManifestMap) - throws IOException { - boolean isValid = true; - - for (Entry manifestEntry : backupManifestMap.entrySet()) { - TableName table = manifestEntry.getKey(); - TreeSet imageSet = new TreeSet(); - - ArrayList depList = manifestEntry.getValue().getDependentListByTable(table); - if (depList != null && !depList.isEmpty()) { - imageSet.addAll(depList); - } - - LOG.info("Dependent image(s) from old to new:"); - for (BackupImage image : imageSet) { - String imageDir = - HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table); - if (!BackupClientUtil.checkPathExist(imageDir, conf)) { - LOG.error("ERROR: backup image does not exist: " + imageDir); - isValid = false; - break; - } - // TODO More validation? - LOG.info("Backup image: " + image.getBackupId() + " for '" + table + "' is available"); - } - } - - return isValid; - } - - /** - * Validate target Tables - * @param tTableArray: target tables - * @param isOverwrite overwrite existing table - * @throws IOException exception - */ - private void checkTargetTables(TableName[] tTableArray, boolean isOverwrite) - throws IOException { - ArrayList existTableList = new ArrayList<>(); - ArrayList disabledTableList = new ArrayList<>(); - - // check if the tables already exist - try(Connection conn = ConnectionFactory.createConnection(conf); - Admin admin = conn.getAdmin()) { - for (TableName tableName : tTableArray) { - if (admin.tableExists(tableName)) { - existTableList.add(tableName); - if (admin.isTableDisabled(tableName)) { - disabledTableList.add(tableName); - } - } else { - LOG.info("HBase table " + tableName - + " does not exist. It will be created during restore process"); - } - } - } - - if (existTableList.size() > 0) { - if (!isOverwrite) { - LOG.error("Existing table found in the restore target, please add \"-overwrite\" " - + "option in the command if you mean to restore to these existing tables"); - LOG.info("Existing table list in restore target: " + existTableList); - throw new IOException("Existing table found in target while no \"-overwrite\" " - + "option found"); - } else { - if (disabledTableList.size() > 0) { - LOG.error("Found offline table in the restore target, " - + "please enable them before restore with \"-overwrite\" option"); - LOG.info("Offline table list in restore target: " + disabledTableList); - throw new IOException( - "Found offline table in the target when restore with \"-overwrite\" option"); - } - } - } - } - - /** - * Restore operation. Stage 2: resolved Backup Image dependency - * @param backupManifestMap : tableName, Manifest - * @param sTableArray The array of tables to be restored - * @param tTableArray The array of mapping tables to restore to - * @return set of BackupImages restored - * @throws IOException exception - */ - private void restoreStage(HashMap backupManifestMap, - TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException { - TreeSet restoreImageSet = new TreeSet(); - boolean truncateIfExists = isOverwrite; - try { - for (int i = 0; i < sTableArray.length; i++) { - TableName table = sTableArray[i]; - BackupManifest manifest = backupManifestMap.get(table); - // Get the image list of this backup for restore in time order from old - // to new. - List list = new ArrayList(); - list.add(manifest.getBackupImage()); - List depList = manifest.getDependentListByTable(table); - list.addAll(depList); - TreeSet restoreList = new TreeSet(list); - LOG.debug("need to clear merged Image. to be implemented in future jira"); - restoreImages(restoreList.iterator(), table, tTableArray[i], truncateIfExists); - restoreImageSet.addAll(restoreList); - - if (restoreImageSet != null && !restoreImageSet.isEmpty()) { - LOG.info("Restore includes the following image(s):"); - for (BackupImage image : restoreImageSet) { - LOG.info("Backup: " - + image.getBackupId() - + " " - + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), - table)); - } - } - } - } catch (Exception e) { - LOG.error("Failed", e); - throw new IOException(e); - } - LOG.debug("restoreStage finished"); - - } - - /** - * Restore operation handle each backupImage in iterator - * @param it: backupImage iterator - ascending - * @param sTable: table to be restored - * @param tTable: table to be restored to - * @throws IOException exception - */ - private void restoreImages(Iterator it, TableName sTable, - TableName tTable, boolean truncateIfExists) - throws IOException { - - // First image MUST be image of a FULL backup - BackupImage image = it.next(); - - String rootDir = image.getRootDir(); - String backupId = image.getBackupId(); - Path backupRoot = new Path(rootDir); - - // We need hFS only for full restore (see the code) - RestoreServerUtil restoreTool = new RestoreServerUtil(conf, backupRoot, backupId); - BackupManifest manifest = HBackupFileSystem.getManifest(sTable, conf, backupRoot, backupId); - - Path tableBackupPath = HBackupFileSystem.getTableBackupPath(sTable, backupRoot, backupId); - - // TODO: convert feature will be provided in a future JIRA - boolean converted = false; - String lastIncrBackupId = null; - List logDirList = null; - - // Scan incremental backups - if (it.hasNext()) { - // obtain the backupId for most recent incremental - logDirList = new ArrayList(); - while (it.hasNext()) { - BackupImage im = it.next(); - String logBackupDir = HBackupFileSystem.getLogBackupDir(im.getRootDir(), im.getBackupId()); - logDirList.add(logBackupDir); - lastIncrBackupId = im.getBackupId(); - } - } - if (manifest.getType() == BackupType.FULL || converted) { - LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from " - + (converted ? "converted" : "full") + " backup image " + tableBackupPath.toString()); - restoreTool.fullRestoreTable(tableBackupPath, sTable, tTable, - converted, truncateIfExists, lastIncrBackupId); - } else { // incremental Backup - throw new IOException("Unexpected backup type " + image.getType()); - } - - // The rest one are incremental - if (logDirList != null) { - String logDirs = StringUtils.join(logDirList, ","); - LOG.info("Restoring '" + sTable + "' to '" + tTable - + "' from log dirs: " + logDirs); - String[] sarr = new String[logDirList.size()]; - logDirList.toArray(sarr); - Path[] paths = org.apache.hadoop.util.StringUtils.stringToPath(sarr); - restoreTool.incrementalRestoreTable(tableBackupPath, paths, new TableName[] { sTable }, - new TableName[] { tTable }, lastIncrBackupId); - } - LOG.info(sTable + " has been successfully restored to " + tTable); - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java index 620d622..e179a73 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java @@ -25,7 +25,9 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; +import java.util.Map.Entry; import java.util.TreeMap; +import java.util.TreeSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -42,6 +44,9 @@ import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.BackupRestoreServerFactory; import org.apache.hadoop.hbase.backup.HBackupFileSystem; import org.apache.hadoop.hbase.backup.IncrementalRestoreService; +import org.apache.hadoop.hbase.backup.RestoreRequest; +import org.apache.hadoop.hbase.backup.impl.BackupManifest; +import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; @@ -705,4 +710,46 @@ public class RestoreServerUtil { throw new IOException(e); } } + + public static boolean validate(HashMap backupManifestMap, + Configuration conf) throws IOException { + boolean isValid = true; + + for (Entry manifestEntry : backupManifestMap.entrySet()) { + TableName table = manifestEntry.getKey(); + TreeSet imageSet = new TreeSet(); + + ArrayList depList = manifestEntry.getValue().getDependentListByTable(table); + if (depList != null && !depList.isEmpty()) { + imageSet.addAll(depList); + } + + LOG.info("Dependent image(s) from old to new:"); + for (BackupImage image : imageSet) { + String imageDir = + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table); + if (!BackupClientUtil.checkPathExist(imageDir, conf)) { + LOG.error("ERROR: backup image does not exist: " + imageDir); + isValid = false; + break; + } + LOG.info("Backup image: " + image.getBackupId() + " for '" + table + "' is available"); + } + } + return isValid; + } + + /** + * Create restore request. + * + */ + public static RestoreRequest createRestoreRequest( + String backupRootDir, + String backupId, boolean check, TableName[] fromTables, + TableName[] toTables, boolean isOverwrite) { + RestoreRequest request = new RestoreRequest(); + request.setBackupRootDir(backupRootDir).setBackupId(backupId).setCheck(check) + .setFromTables(fromTables).setToTables(toTables).setOverwrite(isOverwrite); + return request; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index bbd7107..b932d03 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -30,6 +30,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -79,10 +80,13 @@ import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.HBackupFileSystem; import org.apache.hadoop.hbase.backup.impl.BackupManager; +import org.apache.hadoop.hbase.backup.impl.BackupManifest; import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.RestoreTablesProcedure; import org.apache.hadoop.hbase.backup.master.FullTableBackupProcedure; import org.apache.hadoop.hbase.backup.master.IncrementalTableBackupProcedure; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -2695,6 +2699,35 @@ public class HMaster extends HRegionServer implements MasterServices { return tableList; } + @Override + public long restoreTables(String backupRootDir, + String backupId, boolean check, List sTableList, + List tTableList, boolean isOverwrite) throws IOException { + if (check) { + HashMap backupManifestMap = new HashMap<>(); + // check and load backup image manifest for the tables + Path rootPath = new Path(backupRootDir); + HBackupFileSystem.checkImageManifestExist(backupManifestMap, + sTableList.toArray(new TableName[sTableList.size()]), + conf, rootPath, backupId); + + // Check and validate the backup image and its dependencies + if (check) { + if (RestoreServerUtil.validate(backupManifestMap, conf)) { + LOG.info("Checking backup images: ok"); + } else { + String errMsg = "Some dependencies are missing for restore"; + LOG.error(errMsg); + throw new IOException(errMsg); + } + } + } + long procId = this.procedureExecutor.submitProcedure( + new RestoreTablesProcedure(procedureExecutor.getEnvironment(), backupRootDir, backupId, + sTableList, tTableList, isOverwrite)); + return procId; + } + /** * Returns the list of table descriptors that match the specified request * @param namespace the namespace to query, or null if querying for all diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 930bfd6..1e51193 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1072,6 +1072,27 @@ public class MasterRpcServices extends RSRpcServices } @Override + public MasterProtos.RestoreTablesResponse restoreTables( + RpcController controller, + MasterProtos.RestoreTablesRequest request) throws ServiceException { + try { + RestoreTablesResponse.Builder response = RestoreTablesResponse.newBuilder(); + List tablesList = new ArrayList<>(request.getTablesList().size()); + for (HBaseProtos.TableName table : request.getTablesList()) { + tablesList.add(ProtobufUtil.toTableName(table)); + } + List targetTablesList = new ArrayList<>(request.getTargetTablesList().size()); + for (HBaseProtos.TableName table : request.getTargetTablesList()) { + targetTablesList.add(ProtobufUtil.toTableName(table)); + } + long procId = master.restoreTables(request.getBackupRootDir(), request.getBackupId(), + request.getDependencyCheckOnly(), tablesList, targetTablesList, request.getOverwrite()); + return response.setProcId(procId).build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + @Override public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c, ListTableDescriptorsByNamespaceRequest request) throws ServiceException { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 3557bb4..f5667b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -198,6 +198,13 @@ public interface MasterServices extends Server { final int workers, final long bandwidth) throws IOException; + /* + * Restore table set + */ + public long restoreTables(String backupRootDir, + String backupId, boolean check, List sTableList, + List tTableList, boolean isOverwrite) throws IOException; + /** * Enable an existing table * @param tableName The table name diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java index 56983a9..5356d2d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; @InterfaceStability.Evolving public interface TableProcedureInterface { public enum TableOperationType { - CREATE, DELETE, DISABLE, EDIT, ENABLE, READ, BACKUP, + CREATE, DELETE, DISABLE, EDIT, ENABLE, READ, BACKUP, RESTORE, }; /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index d617b52..ff5e739 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -266,21 +266,7 @@ public class TestBackupBase { protected BackupAdmin getBackupAdmin() throws IOException { return TEST_UTIL.getAdmin().getBackupAdmin(); } - - /** - * Get restore request. - * - */ - public RestoreRequest createRestoreRequest( - String backupRootDir, - String backupId, boolean check, TableName[] fromTables, - TableName[] toTables, boolean isOverwrite) { - RestoreRequest request = new RestoreRequest(); - request.setBackupRootDir(backupRootDir).setBackupId(backupId).setCheck(check). - setFromTables(fromTables).setToTables(toTables).setOverwrite(isOverwrite); - return request; -} - + /** * Helper method */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java index 2e04f1a..abdf3c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java @@ -19,6 +19,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; import org.apache.hadoop.hbase.client.BackupAdmin; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -55,16 +56,16 @@ public class TestBackupDeleteRestore extends TestBackupBase { Delete delete = new Delete("row0".getBytes()); table.delete(delete); hba.flush(table1); - } - + } + TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = null;//new TableName[] { table1_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap, true)); - - + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, + tableset, tablemap, true)); + int numRowsAfterRestore = TEST_UTIL.countRows(table1); assertEquals( numRows, numRowsAfterRestore); hba.close(); - } + } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java index 0eddbfe..639aea4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java @@ -20,6 +20,7 @@ import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; import org.apache.hadoop.hbase.client.BackupAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -34,6 +35,31 @@ public class TestFullRestore extends TestBackupBase { private static final Log LOG = LogFactory.getLog(TestFullRestore.class); + /* + void restoreTables(String backupRootDir, + String backupId, boolean check, boolean autoRestore, List sTable, + List tTable, boolean isOverwrite) throws IOException { + Connection conn = null; + HBaseAdmin admin = null; + RestoreRequest request = new RestoreRequest(); + request.setAutoRestore(autoRestore).setDependencyCheckOnly(check).setOverwrite(isOverwrite); + request.setBackupId(backupId).setBackupRootDir(backupRootDir); + request.setTableList(sTable).setTargetTableList(tTable); + try { + conn = ConnectionFactory.createConnection(conf1); + admin = (HBaseAdmin) conn.getAdmin(); + admin.restoreTablesSync(request); + } finally { + if (admin != null) { + admin.close(); + } + if (conn != null) { + conn.close(); + } + } + } + */ + /** * Verify that a single table is restored to a new table * @throws Exception @@ -52,7 +78,8 @@ public class TestFullRestore extends TestBackupBase { TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap, false)); + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, + tableset, tablemap, false)); HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); assertTrue(hba.tableExists(table1_restore)); TEST_UTIL.deleteTable(table1_restore); @@ -96,7 +123,7 @@ public class TestFullRestore extends TestBackupBase { TableName[] restore_tableset = new TableName[] { table2, table3 }; TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, restore_tableset, tablemap, false)); HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); assertTrue(hba.tableExists(table2_restore)); @@ -154,8 +181,8 @@ public class TestFullRestore extends TestBackupBase { TableName[] tableset = new TableName[] { table1 }; BackupAdmin client = getBackupAdmin(); - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, null, - true)); + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, + tableset, null, true)); } /** @@ -198,7 +225,7 @@ public class TestFullRestore extends TestBackupBase { TableName[] restore_tableset = new TableName[] { table2, table3 }; BackupAdmin client = getBackupAdmin(); - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, restore_tableset, null, true)); } @@ -245,8 +272,8 @@ public class TestFullRestore extends TestBackupBase { TableName[] tableset = new TableName[] { TableName.valueOf("faketable") }; TableName[] tablemap = new TableName[] { table1_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap, - false)); + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, + tableset, tablemap, false)); } @@ -291,7 +318,7 @@ public class TestFullRestore extends TestBackupBase { = new TableName[] { TableName.valueOf("faketable1"), TableName.valueOf("faketable2") }; TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, restore_tableset, tablemap, false)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java index ae37b4c..2251c74 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java @@ -30,6 +30,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; import org.apache.hadoop.hbase.client.BackupAdmin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -142,7 +143,7 @@ public class TestIncrementalBackup extends TestBackupBase { BackupAdmin client = getBackupAdmin(); LOG.debug("Restoring full " + backupIdFull); - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, tablesMapFull, false)); @@ -168,8 +169,8 @@ public class TestIncrementalBackup extends TestBackupBase { new TableName[] { table1, table2 }; TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore }; - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false, - tablesRestoreIncMultiple, tablesMapIncMultiple, true)); + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, + false, tablesRestoreIncMultiple, tablesMapIncMultiple, true)); hTable = (HTable) conn.getTable(table1_restore); LOG.debug("After incremental restore: " + hTable.getTableDescriptor()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java index ad11548..a7c0713 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java @@ -25,6 +25,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; import org.apache.hadoop.hbase.client.BackupAdmin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -101,7 +102,7 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase { new TableName[] { table1_restore, table2_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, tablesMapFull, false)); @@ -126,8 +127,8 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase { new TableName[] { table1 }; TableName[] tablesMapIncMultiple = new TableName[] { table1_restore }; - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false, - tablesRestoreIncMultiple, tablesMapIncMultiple, true)); + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, + false, tablesRestoreIncMultiple, tablesMapIncMultiple, true)); hTable = (HTable) conn.getTable(table1_restore); Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java index 84fad8c..cc351e1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteBackup.java @@ -21,6 +21,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; import org.apache.hadoop.hbase.client.BackupAdmin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -100,8 +101,8 @@ public class TestRemoteBackup extends TestBackupBase { new TableName[] { table1_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false, tablesRestoreFull, - tablesMapFull, false)); + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false, + tablesRestoreFull, tablesMapFull, false)); // check tables for full restore HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java index 1be2aa2..988d07c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java @@ -16,6 +16,7 @@ import static org.junit.Assert.assertTrue; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.junit.Test; @@ -39,8 +40,8 @@ public class TestRemoteRestore extends TestBackupBase { LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; - getBackupAdmin().restore(createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false, tableset, - tablemap, false)); + getBackupAdmin().restore(RestoreServerUtil.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, + backupId, false, tableset, tablemap, false)); HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); assertTrue(hba.tableExists(table1_restore)); TEST_UTIL.deleteTable(table1_restore); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java index d2308d1..e20f2cc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java @@ -25,6 +25,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.junit.Test; @@ -46,8 +47,8 @@ public class TestRestoreBoundaryTests extends TestBackupBase { LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; - getBackupAdmin().restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap, - false)); + getBackupAdmin().restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupId, + false, tableset, tablemap, false)); HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); assertTrue(hba.tableExists(table1_restore)); TEST_UTIL.deleteTable(table1_restore); @@ -65,13 +66,13 @@ public class TestRestoreBoundaryTests extends TestBackupBase { String backupId = fullTableBackup(tables); TableName[] restore_tableset = new TableName[] { table2, table3}; TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; - getBackupAdmin().restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, restore_tableset, - tablemap, - false)); + getBackupAdmin().restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupId, + false, restore_tableset, tablemap, false)); HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); assertTrue(hba.tableExists(table2_restore)); assertTrue(hba.tableExists(table3_restore)); TEST_UTIL.deleteTable(table2_restore); TEST_UTIL.deleteTable(table3_restore); + hba.close(); } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index d3d5357..a1cee3c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -440,6 +440,12 @@ public class TestCatalogJanitor { return null; } + @Override + public long restoreTables(String backupRootDir, + String backupId, boolean check, List sTableList, + List tTableList, boolean isOverwrite) throws IOException { + return -1; + } @Override public List listTableDescriptorsByNamespace(String name) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesProcedure.java new file mode 100644 index 0000000..4ec232b --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesProcedure.java @@ -0,0 +1,398 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.TreeSet; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.master.TableStateManager; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesState; +import org.apache.hadoop.security.UserGroupInformation; + +@InterfaceAudience.Private +public class RestoreTablesProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(RestoreTablesProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + private Configuration conf; + private String backupId; + private List sTableList; + private List tTableList; + private String targetRootDir; + private boolean isOverwrite; + + public RestoreTablesProcedure() { + // Required by the Procedure framework to create the procedure on replay + } + + public RestoreTablesProcedure(final MasterProcedureEnv env, + final String targetRootDir, String backupId, List sTableList, + List tTableList, boolean isOverwrite) throws IOException { + this.targetRootDir = targetRootDir; + this.backupId = backupId; + this.sTableList = sTableList; + this.tTableList = tTableList; + if (tTableList == null || tTableList.isEmpty()) { + this.tTableList = sTableList; + } + this.isOverwrite = isOverwrite; + this.setOwner(env.getRequestUser().getUGI().getShortUserName()); + } + + @Override + public byte[] getResult() { + return null; + } + + /** + * Validate target Tables + * @param conn connection + * @param mgr table state manager + * @param tTableArray: target tables + * @param isOverwrite overwrite existing table + * @throws IOException exception + */ + private void checkTargetTables(Connection conn, TableStateManager mgr, TableName[] tTableArray, + boolean isOverwrite) + throws IOException { + ArrayList existTableList = new ArrayList<>(); + ArrayList disabledTableList = new ArrayList<>(); + + // check if the tables already exist + for (TableName tableName : tTableArray) { + if (MetaTableAccessor.tableExists(conn, tableName)) { + existTableList.add(tableName); + if (mgr.isTableState(tableName, TableState.State.DISABLED, TableState.State.DISABLING)) { + disabledTableList.add(tableName); + } + } else { + LOG.info("HBase table " + tableName + + " does not exist. It will be created during restore process"); + } + } + + if (existTableList.size() > 0) { + if (!isOverwrite) { + LOG.error("Existing table (" + existTableList + ") found in the restore target, please add " + + "\"-overwrite\" option in the command if you mean to restore to these existing tables"); + throw new IOException("Existing table found in target while no \"-overwrite\" " + + "option found"); + } else { + if (disabledTableList.size() > 0) { + LOG.error("Found offline table in the restore target, " + + "please enable them before restore with \"-overwrite\" option"); + LOG.info("Offline table list in restore target: " + disabledTableList); + throw new IOException( + "Found offline table in the target when restore with \"-overwrite\" option"); + } + } + } + } + + /** + * Restore operation handle each backupImage in iterator + * @param it: backupImage iterator - ascending + * @param sTable: table to be restored + * @param tTable: table to be restored to + * @throws IOException exception + */ + private void restoreImages(Iterator it, TableName sTable, + TableName tTable, boolean truncateIfExists) + throws IOException { + + // First image MUST be image of a FULL backup + BackupImage image = it.next(); + + String rootDir = image.getRootDir(); + String backupId = image.getBackupId(); + Path backupRoot = new Path(rootDir); + + // We need hFS only for full restore (see the code) + RestoreServerUtil restoreTool = new RestoreServerUtil(conf, backupRoot, backupId); + BackupManifest manifest = HBackupFileSystem.getManifest(sTable, conf, backupRoot, backupId); + + Path tableBackupPath = HBackupFileSystem.getTableBackupPath(sTable, backupRoot, backupId); + + // TODO: convert feature will be provided in a future JIRA + boolean converted = false; + String lastIncrBackupId = null; + List logDirList = null; + + // Scan incremental backups + if (it.hasNext()) { + // obtain the backupId for most recent incremental + logDirList = new ArrayList(); + while (it.hasNext()) { + BackupImage im = it.next(); + String logBackupDir = HBackupFileSystem.getLogBackupDir(im.getRootDir(), im.getBackupId()); + logDirList.add(logBackupDir); + lastIncrBackupId = im.getBackupId(); + } + } + if (manifest.getType() == BackupType.FULL || converted) { + LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from " + + (converted ? "converted" : "full") + " backup image " + tableBackupPath.toString()); + restoreTool.fullRestoreTable(tableBackupPath, sTable, tTable, + converted, truncateIfExists, lastIncrBackupId); + } else { // incremental Backup + throw new IOException("Unexpected backup type " + image.getType()); + } + + // The rest one are incremental + if (logDirList != null) { + String logDirs = StringUtils.join(logDirList, ","); + LOG.info("Restoring '" + sTable + "' to '" + tTable + + "' from log dirs: " + logDirs); + String[] sarr = new String[logDirList.size()]; + logDirList.toArray(sarr); + Path[] paths = org.apache.hadoop.util.StringUtils.stringToPath(sarr); + restoreTool.incrementalRestoreTable(tableBackupPath, paths, new TableName[] { sTable }, + new TableName[] { tTable }, lastIncrBackupId); + } + LOG.info(sTable + " has been successfully restored to " + tTable); + } + + /** + * Restore operation. Stage 2: resolved Backup Image dependency + * @param backupManifestMap : tableName, Manifest + * @param sTableArray The array of tables to be restored + * @param tTableArray The array of mapping tables to restore to + * @return set of BackupImages restored + * @throws IOException exception + */ + private void restoreStage(HashMap backupManifestMap, + TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException { + TreeSet restoreImageSet = new TreeSet(); + boolean truncateIfExists = isOverwrite; + try { + for (int i = 0; i < sTableArray.length; i++) { + TableName table = sTableArray[i]; + BackupManifest manifest = backupManifestMap.get(table); + // Get the image list of this backup for restore in time order from old + // to new. + List list = new ArrayList(); + list.add(manifest.getBackupImage()); + List depList = manifest.getDependentListByTable(table); + list.addAll(depList); + TreeSet restoreList = new TreeSet(list); + LOG.debug("need to clear merged Image. to be implemented in future jira"); + restoreImages(restoreList.iterator(), table, tTableArray[i], truncateIfExists); + restoreImageSet.addAll(restoreList); + + if (restoreImageSet != null && !restoreImageSet.isEmpty()) { + LOG.info("Restore includes the following image(s):"); + for (BackupImage image : restoreImageSet) { + LOG.info("Backup: " + + image.getBackupId() + + " " + + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), + table)); + } + } + } + } catch (Exception e) { + LOG.error("Failed", e); + throw new IOException(e); + } + LOG.debug("restoreStage finished"); + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, final RestoreTablesState state) + throws InterruptedException { + if (conf == null) { + conf = env.getMasterConfiguration(); + } + if (LOG.isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + Connection conn = env.getMasterServices().getClusterConnection(); + TableName[] tTableArray = tTableList.toArray(new TableName[tTableList.size()]); + try (Admin admin = conn.getAdmin()) { + switch (state) { + case VALIDATION: + + // check the target tables + checkTargetTables(env.getMasterServices().getConnection(), + env.getMasterServices().getTableStateManager(), tTableArray, isOverwrite); + + setNextState(RestoreTablesState.RESTORE_IMAGES); + break; + case RESTORE_IMAGES: + TableName[] sTableArray = sTableList.toArray(new TableName[sTableList.size()]); + HashMap backupManifestMap = new HashMap<>(); + // check and load backup image manifest for the tables + Path rootPath = new Path(targetRootDir); + HBackupFileSystem.checkImageManifestExist(backupManifestMap, sTableArray, conf, rootPath, + backupId); + restoreStage(backupManifestMap, sTableArray, tTableArray, isOverwrite); + + return Flow.NO_MORE_STATE; + + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + setFailure("restore-table", e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final RestoreTablesState state) + throws IOException { + } + + @Override + protected RestoreTablesState getState(final int stateId) { + return RestoreTablesState.valueOf(stateId); + } + + @Override + protected int getStateId(final RestoreTablesState state) { + return state.getNumber(); + } + + @Override + protected RestoreTablesState getInitialState() { + return RestoreTablesState.VALIDATION; + } + + @Override + protected void setNextState(final RestoreTablesState state) { + if (aborted.get()) { + setAbortFailure("snapshot-table", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (targetRootDir="); + sb.append(targetRootDir); + sb.append(" isOverwrite= "); + sb.append(isOverwrite); + sb.append(" backupId= "); + sb.append(backupId); + sb.append(")"); + } + + MasterProtos.RestoreTablesRequest toRestoreTables() { + MasterProtos.RestoreTablesRequest.Builder bldr = MasterProtos.RestoreTablesRequest.newBuilder(); + bldr.setOverwrite(isOverwrite).setBackupId(backupId); + bldr.setBackupRootDir(targetRootDir); + for (TableName table : sTableList) { + bldr.addTables(ProtobufUtil.toProtoTableName(table)); + } + for (TableName table : tTableList) { + bldr.addTargetTables(ProtobufUtil.toProtoTableName(table)); + } + return bldr.build(); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProtos.RestoreTablesRequest restoreTables = toRestoreTables(); + restoreTables.writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + MasterProtos.RestoreTablesRequest proto = + MasterProtos.RestoreTablesRequest.parseDelimitedFrom(stream); + backupId = proto.getBackupId(); + targetRootDir = proto.getBackupRootDir(); + isOverwrite = proto.getOverwrite(); + sTableList = new ArrayList<>(proto.getTablesList().size()); + for (HBaseProtos.TableName table : proto.getTablesList()) { + sTableList.add(ProtobufUtil.toTableName(table)); + } + tTableList = new ArrayList<>(proto.getTargetTablesList().size()); + for (HBaseProtos.TableName table : proto.getTargetTablesList()) { + tTableList.add(ProtobufUtil.toTableName(table)); + } + } + + @Override + public TableName getTableName() { + return TableName.BACKUP_TABLE_NAME; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.RESTORE; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (env.waitInitialized(this)) { + return false; + } + return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } +}