diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreClientFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreClientFactory.java deleted file mode 100644 index b60ab21..0000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreClientFactory.java +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.backup; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.util.ReflectionUtils; - -@InterfaceAudience.Private -@InterfaceStability.Evolving -public final class BackupRestoreClientFactory { - private static final Log LOG = LogFactory.getLog(BackupRestoreClientFactory.class); - - private BackupRestoreClientFactory(){ - throw new AssertionError("Instantiating utility class..."); - } - - - /** - * Gets restore client implementation - * @param conf - configuration - * @return backup client - */ - public static RestoreClient getRestoreClient(Configuration conf) { - try{ - Class cls = - conf.getClassByName("org.apache.hadoop.hbase.backup.impl.RestoreClientImpl"); - - RestoreClient client = (RestoreClient) ReflectionUtils.newInstance(cls, conf); - client.setConf(conf); - return client; - } catch(Exception e){ - LOG.error("Can not instantiate RestoreClient. Make sure you have hbase-server jar in CLASSPATH", e); - } - return null; - } -} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreClient.java deleted file mode 100644 index 07f573e..0000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreClient.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.backup; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; - -@InterfaceAudience.Private -@InterfaceStability.Evolving -public interface RestoreClient { - - public void setConf(Configuration conf); - - /** - * Restore operation. - * @param backupRootDir The root dir for backup image - * @param backupId The backup id for image to be restored - * @param check True if only do dependency check - * @param sTableArray The array of tables to be restored - * @param tTableArray The array of mapping tables to restore to - * @param isOverwrite True then do restore overwrite if target table exists, otherwise fail the - * request if target table exists - * @throws IOException if any failure during restore - */ - public void restore( - String backupRootDir, - String backupId, boolean check, TableName[] sTableArray, - TableName[] tTableArray, boolean isOverwrite) throws IOException; -} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java index c78f0bc..2e1807b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java @@ -21,21 +21,20 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -@InterfaceAudience.Public -@InterfaceStability.Evolving - /** * POJO class for restore request */ +@InterfaceAudience.Public +@InterfaceStability.Evolving public class RestoreRequest { - + private String backupRootDir; private String backupId; private boolean check = false; private TableName[] fromTables; private TableName[] toTables; private boolean overwrite = false; - + public RestoreRequest(){ } @@ -73,7 +72,6 @@ public class RestoreRequest { public RestoreRequest setFromTables(TableName[] fromTables) { this.fromTables = fromTables; return this; - } public TableName[] getToTables() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index f46c855..45d89f6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.backup.BackupRequest; -import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.security.SecurityCapability; @@ -916,6 +916,20 @@ public interface Admin extends Abortable, Closeable { /** + * Restore operation. Asynchronous version. + * @param request RestoreRequest instance + * @throws IOException + */ + public Future restoreTablesAsync(final RestoreRequest request) throws IOException; + + /** + * Restore operation. Synchronous version. + * @param request RestoreRequest instance + * @throws IOException + */ + public void restoreTables(final RestoreRequest userRequest) throws IOException; + + /** * Modify an existing table, more IRB friendly version. Asynchronous operation. This means that * it may be a while before your schema change is updated across all of the table. * You can use Future.get(long, TimeUnit) to wait on the operation to complete. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java index 4134cc8..a7aa1be 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BackupAdmin.java @@ -71,6 +71,21 @@ public interface BackupAdmin extends Closeable{ * @return the backup Id future */ public Future backupTablesAsync(final BackupRequest userRequest) throws IOException; + + /** + * Restore backup + * @param request - restore request + * @throws IOException exception + */ + public void restore(RestoreRequest request) throws IOException; + + /** + * Restore backup + * @param request - restore request + * @return Future which client can wait on + * @throws IOException exception + */ + public Future restoreAsync(RestoreRequest request) throws IOException; /** * Describe backup image command @@ -156,12 +171,4 @@ public interface BackupAdmin extends Closeable{ * @throws IOException exception */ public void removeFromBackupSet(String name, String[] tables) throws IOException; - - /** - * Restore backup - * @param request - restore request - * @throws IOException exception - */ - public void restore(RestoreRequest request) throws IOException; - } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index c245ef4..1ac43f9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -1408,6 +1408,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } @Override + public MasterProtos.RestoreTablesResponse restoreTables( + RpcController controller, + MasterProtos.RestoreTablesRequest request) throws ServiceException { + return stub.restoreTables(controller, request); + } + + @Override public MasterProtos.AddColumnResponse addColumn( RpcController controller, MasterProtos.AddColumnRequest request) throws ServiceException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 8ba26ba..098404e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.backup.BackupRequest; +import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.backup.util.BackupClientUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -148,6 +149,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRespon import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; @@ -1630,6 +1633,60 @@ public class HBaseAdmin implements Admin { } } + /** + * Restore operation. + * @param request RestoreRequest instance + * @throws IOException + */ + @Override + public Future restoreTablesAsync(final RestoreRequest userRequest) throws IOException { + RestoreTablesResponse response = executeCallable( + new MasterCallable(getConnection()) { + @Override + public RestoreTablesResponse call(int callTimeout) throws ServiceException { + RestoreTablesRequest request = RequestConverter.buildRestoreTablesRequest( + userRequest.getBackupRootDir(), userRequest.getBackupId(), + userRequest.isCheck(), userRequest.getFromTables(), userRequest.getToTables(), + userRequest.isOverwrite()); + return master.restoreTables(null, request); + } + }); + return new TableRestoreFuture(this, TableName.BACKUP_TABLE_NAME, response); + } + + @Override + public void restoreTables(final RestoreRequest userRequest) throws IOException { + get(restoreTablesAsync(userRequest), + syncWaitTimeout, TimeUnit.MILLISECONDS); + } + + private static class TableRestoreFuture extends TableFuture { + public TableRestoreFuture(final HBaseAdmin admin, final TableName tableName, + final RestoreTablesResponse response) { + super(admin, tableName, + (response != null && response.hasProcId()) ? response.getProcId() : null); + } + + @Override + public String getOperationType() { + return "RESTORE"; + } + + @Override + protected Void convertResult(final GetProcedureResultResponse response) throws IOException { + if (response.hasException()) { + throw ForeignExceptionUtil.toIOException(response.getException()); + } + return null; + } + + @Override + protected Void postOperationResult(final Void result, + final long deadlineTs) throws IOException, TimeoutException { + return result; + } + } + @Override public Future modifyTable(final TableName tableName, final HTableDescriptor htd) throws IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java index a67d3d2..8e08fd0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseBackupAdmin.java @@ -36,9 +36,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; import org.apache.hadoop.hbase.backup.BackupRequest; -import org.apache.hadoop.hbase.backup.BackupRestoreClientFactory; import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.backup.RestoreClient; import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; import org.apache.hadoop.hbase.backup.util.BackupClientUtil; @@ -407,10 +405,12 @@ public class HBaseBackupAdmin implements BackupAdmin { @Override public void restore(RestoreRequest request) throws IOException { - RestoreClient client = BackupRestoreClientFactory.getRestoreClient(admin.getConfiguration()); - client.restore(request.getBackupRootDir(), request.getBackupId(), request.isCheck(), - request.getFromTables(), request.getToTables(), request.isOverwrite()); + admin.restoreTables(request); + } + @Override + public Future restoreAsync(RestoreRequest request) throws IOException { + return admin.restoreTablesAsync(request); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 5babfb8..1acc1c9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -105,6 +105,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableReques import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; @@ -1286,6 +1287,25 @@ public final class RequestConverter { return builder.build(); } + public static RestoreTablesRequest buildRestoreTablesRequest(String backupRootDir, + String backupId, boolean check, TableName[] sTableList, + TableName[] tTableList, boolean isOverwrite) { + RestoreTablesRequest.Builder builder = RestoreTablesRequest.newBuilder(); + builder.setBackupId(backupId).setBackupRootDir(backupRootDir); + builder.setDependencyCheckOnly(check).setOverwrite(isOverwrite); + if (sTableList != null) { + for (TableName table : sTableList) { + builder.addTables(ProtobufUtil.toProtoTableName(table)); + } + } + if (tTableList != null) { + for (TableName table : tTableList) { + builder.addTargetTables(ProtobufUtil.toProtoTableName(table)); + } + } + return builder.build(); + } + /** * Creates a protocol buffer GetSchemaAlterStatusRequest * diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index b5b6b4c..d4dd46f 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -90,6 +90,88 @@ public final class MasterProtos { // @@protoc_insertion_point(enum_scope:hbase.pb.MasterSwitchType) } + /** + * Protobuf enum {@code hbase.pb.RestoreTablesState} + */ + public enum RestoreTablesState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * VALIDATION = 1; + */ + VALIDATION(0, 1), + /** + * RESTORE_IMAGES = 2; + */ + RESTORE_IMAGES(1, 2), + ; + + /** + * VALIDATION = 1; + */ + public static final int VALIDATION_VALUE = 1; + /** + * RESTORE_IMAGES = 2; + */ + public static final int RESTORE_IMAGES_VALUE = 2; + + + public final int getNumber() { return value; } + + public static RestoreTablesState valueOf(int value) { + switch (value) { + case 1: return VALIDATION; + case 2: return RESTORE_IMAGES; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public RestoreTablesState findValueByNumber(int number) { + return RestoreTablesState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.getDescriptor().getEnumTypes().get(1); + } + + private static final RestoreTablesState[] VALUES = values(); + + public static RestoreTablesState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private RestoreTablesState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.RestoreTablesState) + } + public interface AddColumnRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -59468,20 +59550,1901 @@ public final class MasterProtos { return tables_.get(index); } - // required string target_root_dir = 3; - public static final int TARGET_ROOT_DIR_FIELD_NUMBER = 3; - private java.lang.Object targetRootDir_; + // required string target_root_dir = 3; + public static final int TARGET_ROOT_DIR_FIELD_NUMBER = 3; + private java.lang.Object targetRootDir_; + /** + * required string target_root_dir = 3; + */ + public boolean hasTargetRootDir() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string target_root_dir = 3; + */ + public java.lang.String getTargetRootDir() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetRootDir_ = s; + } + return s; + } + } + /** + * required string target_root_dir = 3; + */ + public com.google.protobuf.ByteString + getTargetRootDirBytes() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetRootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional int64 workers = 4; + public static final int WORKERS_FIELD_NUMBER = 4; + private long workers_; + /** + * optional int64 workers = 4; + */ + public boolean hasWorkers() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int64 workers = 4; + */ + public long getWorkers() { + return workers_; + } + + // optional int64 bandwidth = 5; + public static final int BANDWIDTH_FIELD_NUMBER = 5; + private long bandwidth_; + /** + * optional int64 bandwidth = 5; + */ + public boolean hasBandwidth() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional int64 bandwidth = 5; + */ + public long getBandwidth() { + return bandwidth_; + } + + private void initFields() { + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + tables_ = java.util.Collections.emptyList(); + targetRootDir_ = ""; + workers_ = 0L; + bandwidth_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTargetRootDir()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, type_.getNumber()); + } + for (int i = 0; i < tables_.size(); i++) { + output.writeMessage(2, tables_.get(i)); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(3, getTargetRootDirBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt64(4, workers_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeInt64(5, bandwidth_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, type_.getNumber()); + } + for (int i = 0; i < tables_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tables_.get(i)); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getTargetRootDirBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(4, workers_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(5, bandwidth_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) obj; + + boolean result = true; + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && + (getType() == other.getType()); + } + result = result && getTablesList() + .equals(other.getTablesList()); + result = result && (hasTargetRootDir() == other.hasTargetRootDir()); + if (hasTargetRootDir()) { + result = result && getTargetRootDir() + .equals(other.getTargetRootDir()); + } + result = result && (hasWorkers() == other.hasWorkers()); + if (hasWorkers()) { + result = result && (getWorkers() + == other.getWorkers()); + } + result = result && (hasBandwidth() == other.hasBandwidth()); + if (hasBandwidth()) { + result = result && (getBandwidth() + == other.getBandwidth()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getType()); + } + if (getTablesCount() > 0) { + hash = (37 * hash) + TABLES_FIELD_NUMBER; + hash = (53 * hash) + getTablesList().hashCode(); + } + if (hasTargetRootDir()) { + hash = (37 * hash) + TARGET_ROOT_DIR_FIELD_NUMBER; + hash = (53 * hash) + getTargetRootDir().hashCode(); + } + if (hasWorkers()) { + hash = (37 * hash) + WORKERS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getWorkers()); + } + if (hasBandwidth()) { + hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getBandwidth()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupTablesRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTablesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + bitField0_ = (bitField0_ & ~0x00000001); + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + tablesBuilder_.clear(); + } + targetRootDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + workers_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + bandwidth_ = 0L; + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.type_ = type_; + if (tablesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.tables_ = tables_; + } else { + result.tables_ = tablesBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000002; + } + result.targetRootDir_ = targetRootDir_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000004; + } + result.workers_ = workers_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000008; + } + result.bandwidth_ = bandwidth_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance()) return this; + if (other.hasType()) { + setType(other.getType()); + } + if (tablesBuilder_ == null) { + if (!other.tables_.isEmpty()) { + if (tables_.isEmpty()) { + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureTablesIsMutable(); + tables_.addAll(other.tables_); + } + onChanged(); + } + } else { + if (!other.tables_.isEmpty()) { + if (tablesBuilder_.isEmpty()) { + tablesBuilder_.dispose(); + tablesBuilder_ = null; + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000002); + tablesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTablesFieldBuilder() : null; + } else { + tablesBuilder_.addAllMessages(other.tables_); + } + } + } + if (other.hasTargetRootDir()) { + bitField0_ |= 0x00000004; + targetRootDir_ = other.targetRootDir_; + onChanged(); + } + if (other.hasWorkers()) { + setWorkers(other.getWorkers()); + } + if (other.hasBandwidth()) { + setBandwidth(other.getBandwidth()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasType()) { + + return false; + } + if (!hasTargetRootDir()) { + + return false; + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.BackupType type = 1; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + /** + * required .hbase.pb.BackupType type = 1; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.BackupType type = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { + return type_; + } + /** + * required .hbase.pb.BackupType type = 1; + */ + public Builder setType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + type_ = value; + onChanged(); + return this; + } + /** + * required .hbase.pb.BackupType type = 1; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000001); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + onChanged(); + return this; + } + + // repeated .hbase.pb.TableName tables = 2; + private java.util.List tables_ = + java.util.Collections.emptyList(); + private void ensureTablesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = new java.util.ArrayList(tables_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tablesBuilder_; + + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List getTablesList() { + if (tablesBuilder_ == null) { + return java.util.Collections.unmodifiableList(tables_); + } else { + return tablesBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public int getTablesCount() { + if (tablesBuilder_ == null) { + return tables_.size(); + } else { + return tablesBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); + } else { + return tablesBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.set(index, value); + onChanged(); + } else { + tablesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.set(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(value); + onChanged(); + } else { + tablesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(index, value); + onChanged(); + } else { + tablesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addAllTables( + java.lang.Iterable values) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + super.addAll(values, tables_); + onChanged(); + } else { + tablesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder clearTables() { + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + tablesBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder removeTables(int index) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.remove(index); + onChanged(); + } else { + tablesBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTablesBuilder( + int index) { + return getTablesFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); } else { + return tablesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List + getTablesOrBuilderList() { + if (tablesBuilder_ != null) { + return tablesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tables_); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder() { + return getTablesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder( + int index) { + return getTablesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List + getTablesBuilderList() { + return getTablesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTablesFieldBuilder() { + if (tablesBuilder_ == null) { + tablesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tables_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + tables_ = null; + } + return tablesBuilder_; + } + + // required string target_root_dir = 3; + private java.lang.Object targetRootDir_ = ""; + /** + * required string target_root_dir = 3; + */ + public boolean hasTargetRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string target_root_dir = 3; + */ + public java.lang.String getTargetRootDir() { + java.lang.Object ref = targetRootDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetRootDir_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string target_root_dir = 3; + */ + public com.google.protobuf.ByteString + getTargetRootDirBytes() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetRootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string target_root_dir = 3; + */ + public Builder setTargetRootDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + targetRootDir_ = value; + onChanged(); + return this; + } + /** + * required string target_root_dir = 3; + */ + public Builder clearTargetRootDir() { + bitField0_ = (bitField0_ & ~0x00000004); + targetRootDir_ = getDefaultInstance().getTargetRootDir(); + onChanged(); + return this; + } + /** + * required string target_root_dir = 3; + */ + public Builder setTargetRootDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + targetRootDir_ = value; + onChanged(); + return this; + } + + // optional int64 workers = 4; + private long workers_ ; + /** + * optional int64 workers = 4; + */ + public boolean hasWorkers() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional int64 workers = 4; + */ + public long getWorkers() { + return workers_; + } + /** + * optional int64 workers = 4; + */ + public Builder setWorkers(long value) { + bitField0_ |= 0x00000008; + workers_ = value; + onChanged(); + return this; + } + /** + * optional int64 workers = 4; + */ + public Builder clearWorkers() { + bitField0_ = (bitField0_ & ~0x00000008); + workers_ = 0L; + onChanged(); + return this; + } + + // optional int64 bandwidth = 5; + private long bandwidth_ ; + /** + * optional int64 bandwidth = 5; + */ + public boolean hasBandwidth() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional int64 bandwidth = 5; + */ + public long getBandwidth() { + return bandwidth_; + } + /** + * optional int64 bandwidth = 5; + */ + public Builder setBandwidth(long value) { + bitField0_ |= 0x00000010; + bandwidth_ = value; + onChanged(); + return this; + } + /** + * optional int64 bandwidth = 5; + */ + public Builder clearBandwidth() { + bitField0_ = (bitField0_ & ~0x00000010); + bandwidth_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupTablesRequest) + } + + static { + defaultInstance = new BackupTablesRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BackupTablesRequest) + } + + public interface BackupTablesResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional uint64 proc_id = 1; + /** + * optional uint64 proc_id = 1; + */ + boolean hasProcId(); + /** + * optional uint64 proc_id = 1; + */ + long getProcId(); + + // optional string backup_id = 2; + /** + * optional string backup_id = 2; + */ + boolean hasBackupId(); + /** + * optional string backup_id = 2; + */ + java.lang.String getBackupId(); + /** + * optional string backup_id = 2; + */ + com.google.protobuf.ByteString + getBackupIdBytes(); + } + /** + * Protobuf type {@code hbase.pb.BackupTablesResponse} + */ + public static final class BackupTablesResponse extends + com.google.protobuf.GeneratedMessage + implements BackupTablesResponseOrBuilder { + // Use BackupTablesResponse.newBuilder() to construct. + private BackupTablesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BackupTablesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BackupTablesResponse defaultInstance; + public static BackupTablesResponse getDefaultInstance() { + return defaultInstance; + } + + public BackupTablesResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BackupTablesResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + backupId_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BackupTablesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BackupTablesResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional uint64 proc_id = 1; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + + // optional string backup_id = 2; + public static final int BACKUP_ID_FIELD_NUMBER = 2; + private java.lang.Object backupId_; + /** + * optional string backup_id = 2; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string backup_id = 2; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + backupId_ = s; + } + return s; + } + } + /** + * optional string backup_id = 2; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + procId_ = 0L; + backupId_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, procId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getBackupIdBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, procId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getBackupIdBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) obj; + + boolean result = true; + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupTablesResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + backupId_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.procId_ = procId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.backupId_ = backupId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); + } + if (other.hasBackupId()) { + bitField0_ |= 0x00000002; + backupId_ = other.backupId_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional uint64 proc_id = 1; + private long procId_ ; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); + return this; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000001); + procId_ = 0L; + onChanged(); + return this; + } + + // optional string backup_id = 2; + private java.lang.Object backupId_ = ""; + /** + * optional string backup_id = 2; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string backup_id = 2; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string backup_id = 2; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string backup_id = 2; + */ + public Builder setBackupId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + backupId_ = value; + onChanged(); + return this; + } + /** + * optional string backup_id = 2; + */ + public Builder clearBackupId() { + bitField0_ = (bitField0_ & ~0x00000002); + backupId_ = getDefaultInstance().getBackupId(); + onChanged(); + return this; + } + /** + * optional string backup_id = 2; + */ + public Builder setBackupIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + backupId_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupTablesResponse) + } + + static { + defaultInstance = new BackupTablesResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BackupTablesResponse) + } + + public interface RestoreTablesRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string backup_id = 1; + /** + * required string backup_id = 1; + */ + boolean hasBackupId(); + /** + * required string backup_id = 1; + */ + java.lang.String getBackupId(); + /** + * required string backup_id = 1; + */ + com.google.protobuf.ByteString + getBackupIdBytes(); + + // repeated .hbase.pb.TableName tables = 2; + /** + * repeated .hbase.pb.TableName tables = 2; + */ + java.util.List + getTablesList(); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + int getTablesCount(); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + java.util.List + getTablesOrBuilderList(); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index); + + // repeated .hbase.pb.TableName target_tables = 3; + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + java.util.List + getTargetTablesList(); + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTargetTables(int index); + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + int getTargetTablesCount(); + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + java.util.List + getTargetTablesOrBuilderList(); + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTargetTablesOrBuilder( + int index); + + // required string backup_root_dir = 4; + /** + * required string backup_root_dir = 4; + */ + boolean hasBackupRootDir(); + /** + * required string backup_root_dir = 4; + */ + java.lang.String getBackupRootDir(); + /** + * required string backup_root_dir = 4; + */ + com.google.protobuf.ByteString + getBackupRootDirBytes(); + + // optional bool dependency_check_only = 5; + /** + * optional bool dependency_check_only = 5; + */ + boolean hasDependencyCheckOnly(); + /** + * optional bool dependency_check_only = 5; + */ + boolean getDependencyCheckOnly(); + + // optional bool overwrite = 6; + /** + * optional bool overwrite = 6; + */ + boolean hasOverwrite(); + /** + * optional bool overwrite = 6; + */ + boolean getOverwrite(); + } + /** + * Protobuf type {@code hbase.pb.RestoreTablesRequest} + */ + public static final class RestoreTablesRequest extends + com.google.protobuf.GeneratedMessage + implements RestoreTablesRequestOrBuilder { + // Use RestoreTablesRequest.newBuilder() to construct. + private RestoreTablesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RestoreTablesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RestoreTablesRequest defaultInstance; + public static RestoreTablesRequest getDefaultInstance() { + return defaultInstance; + } + + public RestoreTablesRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RestoreTablesRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + backupId_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + tables_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + targetTables_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + targetTables_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + case 34: { + bitField0_ |= 0x00000002; + backupRootDir_ = input.readBytes(); + break; + } + case 40: { + bitField0_ |= 0x00000004; + dependencyCheckOnly_ = input.readBool(); + break; + } + case 48: { + bitField0_ |= 0x00000008; + overwrite_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + } + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + targetTables_ = java.util.Collections.unmodifiableList(targetTables_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RestoreTablesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RestoreTablesRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string backup_id = 1; + public static final int BACKUP_ID_FIELD_NUMBER = 1; + private java.lang.Object backupId_; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + backupId_ = s; + } + return s; + } + } + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .hbase.pb.TableName tables = 2; + public static final int TABLES_FIELD_NUMBER = 2; + private java.util.List tables_; + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List getTablesList() { + return tables_; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List + getTablesOrBuilderList() { + return tables_; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public int getTablesCount() { + return tables_.size(); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { + return tables_.get(index); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + return tables_.get(index); + } + + // repeated .hbase.pb.TableName target_tables = 3; + public static final int TARGET_TABLES_FIELD_NUMBER = 3; + private java.util.List targetTables_; /** - * required string target_root_dir = 3; + * repeated .hbase.pb.TableName target_tables = 3; */ - public boolean hasTargetRootDir() { + public java.util.List getTargetTablesList() { + return targetTables_; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public java.util.List + getTargetTablesOrBuilderList() { + return targetTables_; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public int getTargetTablesCount() { + return targetTables_.size(); + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTargetTables(int index) { + return targetTables_.get(index); + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTargetTablesOrBuilder( + int index) { + return targetTables_.get(index); + } + + // required string backup_root_dir = 4; + public static final int BACKUP_ROOT_DIR_FIELD_NUMBER = 4; + private java.lang.Object backupRootDir_; + /** + * required string backup_root_dir = 4; + */ + public boolean hasBackupRootDir() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * required string target_root_dir = 3; + * required string backup_root_dir = 4; */ - public java.lang.String getTargetRootDir() { - java.lang.Object ref = targetRootDir_; + public java.lang.String getBackupRootDir() { + java.lang.Object ref = backupRootDir_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { @@ -59489,77 +61452,78 @@ public final class MasterProtos { (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - targetRootDir_ = s; + backupRootDir_ = s; } return s; } } /** - * required string target_root_dir = 3; + * required string backup_root_dir = 4; */ public com.google.protobuf.ByteString - getTargetRootDirBytes() { - java.lang.Object ref = targetRootDir_; + getBackupRootDirBytes() { + java.lang.Object ref = backupRootDir_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - targetRootDir_ = b; + backupRootDir_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - // optional int64 workers = 4; - public static final int WORKERS_FIELD_NUMBER = 4; - private long workers_; + // optional bool dependency_check_only = 5; + public static final int DEPENDENCY_CHECK_ONLY_FIELD_NUMBER = 5; + private boolean dependencyCheckOnly_; /** - * optional int64 workers = 4; + * optional bool dependency_check_only = 5; */ - public boolean hasWorkers() { + public boolean hasDependencyCheckOnly() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** - * optional int64 workers = 4; + * optional bool dependency_check_only = 5; */ - public long getWorkers() { - return workers_; + public boolean getDependencyCheckOnly() { + return dependencyCheckOnly_; } - // optional int64 bandwidth = 5; - public static final int BANDWIDTH_FIELD_NUMBER = 5; - private long bandwidth_; + // optional bool overwrite = 6; + public static final int OVERWRITE_FIELD_NUMBER = 6; + private boolean overwrite_; /** - * optional int64 bandwidth = 5; + * optional bool overwrite = 6; */ - public boolean hasBandwidth() { + public boolean hasOverwrite() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** - * optional int64 bandwidth = 5; + * optional bool overwrite = 6; */ - public long getBandwidth() { - return bandwidth_; + public boolean getOverwrite() { + return overwrite_; } private void initFields() { - type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + backupId_ = ""; tables_ = java.util.Collections.emptyList(); - targetRootDir_ = ""; - workers_ = 0L; - bandwidth_ = 0L; + targetTables_ = java.util.Collections.emptyList(); + backupRootDir_ = ""; + dependencyCheckOnly_ = false; + overwrite_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasType()) { + if (!hasBackupId()) { memoizedIsInitialized = 0; return false; } - if (!hasTargetRootDir()) { + if (!hasBackupRootDir()) { memoizedIsInitialized = 0; return false; } @@ -59569,6 +61533,12 @@ public final class MasterProtos { return false; } } + for (int i = 0; i < getTargetTablesCount(); i++) { + if (!getTargetTables(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -59577,19 +61547,22 @@ public final class MasterProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, type_.getNumber()); + output.writeBytes(1, getBackupIdBytes()); } for (int i = 0; i < tables_.size(); i++) { output.writeMessage(2, tables_.get(i)); } + for (int i = 0; i < targetTables_.size(); i++) { + output.writeMessage(3, targetTables_.get(i)); + } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(3, getTargetRootDirBytes()); + output.writeBytes(4, getBackupRootDirBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeInt64(4, workers_); + output.writeBool(5, dependencyCheckOnly_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeInt64(5, bandwidth_); + output.writeBool(6, overwrite_); } getUnknownFields().writeTo(output); } @@ -59602,23 +61575,27 @@ public final class MasterProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, type_.getNumber()); + .computeBytesSize(1, getBackupIdBytes()); } for (int i = 0; i < tables_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(2, tables_.get(i)); } + for (int i = 0; i < targetTables_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, targetTables_.get(i)); + } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getTargetRootDirBytes()); + .computeBytesSize(4, getBackupRootDirBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream - .computeInt64Size(4, workers_); + .computeBoolSize(5, dependencyCheckOnly_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream - .computeInt64Size(5, bandwidth_); + .computeBoolSize(6, overwrite_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -59637,33 +61614,35 @@ public final class MasterProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest) obj; boolean result = true; - result = result && (hasType() == other.hasType()); - if (hasType()) { - result = result && - (getType() == other.getType()); + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); } result = result && getTablesList() .equals(other.getTablesList()); - result = result && (hasTargetRootDir() == other.hasTargetRootDir()); - if (hasTargetRootDir()) { - result = result && getTargetRootDir() - .equals(other.getTargetRootDir()); - } - result = result && (hasWorkers() == other.hasWorkers()); - if (hasWorkers()) { - result = result && (getWorkers() - == other.getWorkers()); - } - result = result && (hasBandwidth() == other.hasBandwidth()); - if (hasBandwidth()) { - result = result && (getBandwidth() - == other.getBandwidth()); + result = result && getTargetTablesList() + .equals(other.getTargetTablesList()); + result = result && (hasBackupRootDir() == other.hasBackupRootDir()); + if (hasBackupRootDir()) { + result = result && getBackupRootDir() + .equals(other.getBackupRootDir()); + } + result = result && (hasDependencyCheckOnly() == other.hasDependencyCheckOnly()); + if (hasDependencyCheckOnly()) { + result = result && (getDependencyCheckOnly() + == other.getDependencyCheckOnly()); + } + result = result && (hasOverwrite() == other.hasOverwrite()); + if (hasOverwrite()) { + result = result && (getOverwrite() + == other.getOverwrite()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -59678,78 +61657,82 @@ public final class MasterProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasType()) { - hash = (37 * hash) + TYPE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getType()); + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); } if (getTablesCount() > 0) { hash = (37 * hash) + TABLES_FIELD_NUMBER; hash = (53 * hash) + getTablesList().hashCode(); } - if (hasTargetRootDir()) { - hash = (37 * hash) + TARGET_ROOT_DIR_FIELD_NUMBER; - hash = (53 * hash) + getTargetRootDir().hashCode(); + if (getTargetTablesCount() > 0) { + hash = (37 * hash) + TARGET_TABLES_FIELD_NUMBER; + hash = (53 * hash) + getTargetTablesList().hashCode(); } - if (hasWorkers()) { - hash = (37 * hash) + WORKERS_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getWorkers()); + if (hasBackupRootDir()) { + hash = (37 * hash) + BACKUP_ROOT_DIR_FIELD_NUMBER; + hash = (53 * hash) + getBackupRootDir().hashCode(); } - if (hasBandwidth()) { - hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getBandwidth()); + if (hasDependencyCheckOnly()) { + hash = (37 * hash) + DEPENDENCY_CHECK_ONLY_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getDependencyCheckOnly()); + } + if (hasOverwrite()) { + hash = (37 * hash) + OVERWRITE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getOverwrite()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -59758,7 +61741,7 @@ public final class MasterProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -59770,24 +61753,24 @@ public final class MasterProtos { return builder; } /** - * Protobuf type {@code hbase.pb.BackupTablesRequest} + * Protobuf type {@code hbase.pb.RestoreTablesRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -59800,6 +61783,7 @@ public final class MasterProtos { private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getTablesFieldBuilder(); + getTargetTablesFieldBuilder(); } } private static Builder create() { @@ -59808,7 +61792,7 @@ public final class MasterProtos { public Builder clear() { super.clear(); - type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + backupId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); if (tablesBuilder_ == null) { tables_ = java.util.Collections.emptyList(); @@ -59816,12 +61800,18 @@ public final class MasterProtos { } else { tablesBuilder_.clear(); } - targetRootDir_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - workers_ = 0L; + if (targetTablesBuilder_ == null) { + targetTables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + targetTablesBuilder_.clear(); + } + backupRootDir_ = ""; bitField0_ = (bitField0_ & ~0x00000008); - bandwidth_ = 0L; + dependencyCheckOnly_ = false; bitField0_ = (bitField0_ & ~0x00000010); + overwrite_ = false; + bitField0_ = (bitField0_ & ~0x00000020); return this; } @@ -59831,29 +61821,29 @@ public final class MasterProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.type_ = type_; + result.backupId_ = backupId_; if (tablesBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002)) { tables_ = java.util.Collections.unmodifiableList(tables_); @@ -59863,36 +61853,47 @@ public final class MasterProtos { } else { result.tables_ = tablesBuilder_.build(); } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000002; + if (targetTablesBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + targetTables_ = java.util.Collections.unmodifiableList(targetTables_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.targetTables_ = targetTables_; + } else { + result.targetTables_ = targetTablesBuilder_.build(); } - result.targetRootDir_ = targetRootDir_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000004; + to_bitField0_ |= 0x00000002; } - result.workers_ = workers_; + result.backupRootDir_ = backupRootDir_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000004; + } + result.dependencyCheckOnly_ = dependencyCheckOnly_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000008; } - result.bandwidth_ = bandwidth_; + result.overwrite_ = overwrite_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance()) return this; - if (other.hasType()) { - setType(other.getType()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.getDefaultInstance()) return this; + if (other.hasBackupId()) { + bitField0_ |= 0x00000001; + backupId_ = other.backupId_; + onChanged(); } if (tablesBuilder_ == null) { if (!other.tables_.isEmpty()) { @@ -59920,27 +61921,53 @@ public final class MasterProtos { } } } - if (other.hasTargetRootDir()) { - bitField0_ |= 0x00000004; - targetRootDir_ = other.targetRootDir_; + if (targetTablesBuilder_ == null) { + if (!other.targetTables_.isEmpty()) { + if (targetTables_.isEmpty()) { + targetTables_ = other.targetTables_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureTargetTablesIsMutable(); + targetTables_.addAll(other.targetTables_); + } + onChanged(); + } + } else { + if (!other.targetTables_.isEmpty()) { + if (targetTablesBuilder_.isEmpty()) { + targetTablesBuilder_.dispose(); + targetTablesBuilder_ = null; + targetTables_ = other.targetTables_; + bitField0_ = (bitField0_ & ~0x00000004); + targetTablesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTargetTablesFieldBuilder() : null; + } else { + targetTablesBuilder_.addAllMessages(other.targetTables_); + } + } + } + if (other.hasBackupRootDir()) { + bitField0_ |= 0x00000008; + backupRootDir_ = other.backupRootDir_; onChanged(); } - if (other.hasWorkers()) { - setWorkers(other.getWorkers()); + if (other.hasDependencyCheckOnly()) { + setDependencyCheckOnly(other.getDependencyCheckOnly()); } - if (other.hasBandwidth()) { - setBandwidth(other.getBandwidth()); + if (other.hasOverwrite()) { + setOverwrite(other.getOverwrite()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasType()) { + if (!hasBackupId()) { return false; } - if (!hasTargetRootDir()) { + if (!hasBackupRootDir()) { return false; } @@ -59950,6 +61977,12 @@ public final class MasterProtos { return false; } } + for (int i = 0; i < getTargetTablesCount(); i++) { + if (!getTargetTables(i).isInitialized()) { + + return false; + } + } return true; } @@ -59957,11 +61990,11 @@ public final class MasterProtos { com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -59972,38 +62005,76 @@ public final class MasterProtos { } private int bitField0_; - // required .hbase.pb.BackupType type = 1; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + // required string backup_id = 1; + private java.lang.Object backupId_ = ""; /** - * required .hbase.pb.BackupType type = 1; + * required string backup_id = 1; */ - public boolean hasType() { + public boolean hasBackupId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.BackupType type = 1; + * required string backup_id = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { - return type_; + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } } /** - * required .hbase.pb.BackupType type = 1; + * required string backup_id = 1; */ - public Builder setType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { - if (value == null) { - throw new NullPointerException(); + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } - bitField0_ |= 0x00000001; - type_ = value; + } + /** + * required string backup_id = 1; + */ + public Builder setBackupId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; onChanged(); return this; } /** - * required .hbase.pb.BackupType type = 1; + * required string backup_id = 1; */ - public Builder clearType() { + public Builder clearBackupId() { bitField0_ = (bitField0_ & ~0x00000001); - type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + backupId_ = getDefaultInstance().getBackupId(); + onChanged(); + return this; + } + /** + * required string backup_id = 1; + */ + public Builder setBackupIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; onChanged(); return this; } @@ -60248,158 +62319,398 @@ public final class MasterProtos { return tablesBuilder_; } - // required string target_root_dir = 3; - private java.lang.Object targetRootDir_ = ""; + // repeated .hbase.pb.TableName target_tables = 3; + private java.util.List targetTables_ = + java.util.Collections.emptyList(); + private void ensureTargetTablesIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + targetTables_ = new java.util.ArrayList(targetTables_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> targetTablesBuilder_; + /** - * required string target_root_dir = 3; + * repeated .hbase.pb.TableName target_tables = 3; */ - public boolean hasTargetRootDir() { - return ((bitField0_ & 0x00000004) == 0x00000004); + public java.util.List getTargetTablesList() { + if (targetTablesBuilder_ == null) { + return java.util.Collections.unmodifiableList(targetTables_); + } else { + return targetTablesBuilder_.getMessageList(); + } } /** - * required string target_root_dir = 3; + * repeated .hbase.pb.TableName target_tables = 3; */ - public java.lang.String getTargetRootDir() { - java.lang.Object ref = targetRootDir_; + public int getTargetTablesCount() { + if (targetTablesBuilder_ == null) { + return targetTables_.size(); + } else { + return targetTablesBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTargetTables(int index) { + if (targetTablesBuilder_ == null) { + return targetTables_.get(index); + } else { + return targetTablesBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder setTargetTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (targetTablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTargetTablesIsMutable(); + targetTables_.set(index, value); + onChanged(); + } else { + targetTablesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder setTargetTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (targetTablesBuilder_ == null) { + ensureTargetTablesIsMutable(); + targetTables_.set(index, builderForValue.build()); + onChanged(); + } else { + targetTablesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder addTargetTables(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (targetTablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTargetTablesIsMutable(); + targetTables_.add(value); + onChanged(); + } else { + targetTablesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder addTargetTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (targetTablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTargetTablesIsMutable(); + targetTables_.add(index, value); + onChanged(); + } else { + targetTablesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder addTargetTables( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (targetTablesBuilder_ == null) { + ensureTargetTablesIsMutable(); + targetTables_.add(builderForValue.build()); + onChanged(); + } else { + targetTablesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder addTargetTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (targetTablesBuilder_ == null) { + ensureTargetTablesIsMutable(); + targetTables_.add(index, builderForValue.build()); + onChanged(); + } else { + targetTablesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder addAllTargetTables( + java.lang.Iterable values) { + if (targetTablesBuilder_ == null) { + ensureTargetTablesIsMutable(); + super.addAll(values, targetTables_); + onChanged(); + } else { + targetTablesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder clearTargetTables() { + if (targetTablesBuilder_ == null) { + targetTables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + targetTablesBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder removeTargetTables(int index) { + if (targetTablesBuilder_ == null) { + ensureTargetTablesIsMutable(); + targetTables_.remove(index); + onChanged(); + } else { + targetTablesBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTargetTablesBuilder( + int index) { + return getTargetTablesFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTargetTablesOrBuilder( + int index) { + if (targetTablesBuilder_ == null) { + return targetTables_.get(index); } else { + return targetTablesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public java.util.List + getTargetTablesOrBuilderList() { + if (targetTablesBuilder_ != null) { + return targetTablesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(targetTables_); + } + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTargetTablesBuilder() { + return getTargetTablesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTargetTablesBuilder( + int index) { + return getTargetTablesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public java.util.List + getTargetTablesBuilderList() { + return getTargetTablesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTargetTablesFieldBuilder() { + if (targetTablesBuilder_ == null) { + targetTablesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + targetTables_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + targetTables_ = null; + } + return targetTablesBuilder_; + } + + // required string backup_root_dir = 4; + private java.lang.Object backupRootDir_ = ""; + /** + * required string backup_root_dir = 4; + */ + public boolean hasBackupRootDir() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required string backup_root_dir = 4; + */ + public java.lang.String getBackupRootDir() { + java.lang.Object ref = backupRootDir_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); - targetRootDir_ = s; + backupRootDir_ = s; return s; } else { return (java.lang.String) ref; } } /** - * required string target_root_dir = 3; + * required string backup_root_dir = 4; */ public com.google.protobuf.ByteString - getTargetRootDirBytes() { - java.lang.Object ref = targetRootDir_; + getBackupRootDirBytes() { + java.lang.Object ref = backupRootDir_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - targetRootDir_ = b; + backupRootDir_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** - * required string target_root_dir = 3; + * required string backup_root_dir = 4; */ - public Builder setTargetRootDir( + public Builder setBackupRootDir( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000004; - targetRootDir_ = value; + bitField0_ |= 0x00000008; + backupRootDir_ = value; onChanged(); return this; } /** - * required string target_root_dir = 3; + * required string backup_root_dir = 4; */ - public Builder clearTargetRootDir() { - bitField0_ = (bitField0_ & ~0x00000004); - targetRootDir_ = getDefaultInstance().getTargetRootDir(); + public Builder clearBackupRootDir() { + bitField0_ = (bitField0_ & ~0x00000008); + backupRootDir_ = getDefaultInstance().getBackupRootDir(); onChanged(); return this; } /** - * required string target_root_dir = 3; + * required string backup_root_dir = 4; */ - public Builder setTargetRootDirBytes( + public Builder setBackupRootDirBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000004; - targetRootDir_ = value; + bitField0_ |= 0x00000008; + backupRootDir_ = value; onChanged(); return this; } - // optional int64 workers = 4; - private long workers_ ; + // optional bool dependency_check_only = 5; + private boolean dependencyCheckOnly_ ; /** - * optional int64 workers = 4; + * optional bool dependency_check_only = 5; */ - public boolean hasWorkers() { - return ((bitField0_ & 0x00000008) == 0x00000008); + public boolean hasDependencyCheckOnly() { + return ((bitField0_ & 0x00000010) == 0x00000010); } /** - * optional int64 workers = 4; + * optional bool dependency_check_only = 5; */ - public long getWorkers() { - return workers_; + public boolean getDependencyCheckOnly() { + return dependencyCheckOnly_; } /** - * optional int64 workers = 4; + * optional bool dependency_check_only = 5; */ - public Builder setWorkers(long value) { - bitField0_ |= 0x00000008; - workers_ = value; + public Builder setDependencyCheckOnly(boolean value) { + bitField0_ |= 0x00000010; + dependencyCheckOnly_ = value; onChanged(); return this; } /** - * optional int64 workers = 4; + * optional bool dependency_check_only = 5; */ - public Builder clearWorkers() { - bitField0_ = (bitField0_ & ~0x00000008); - workers_ = 0L; + public Builder clearDependencyCheckOnly() { + bitField0_ = (bitField0_ & ~0x00000010); + dependencyCheckOnly_ = false; onChanged(); return this; } - // optional int64 bandwidth = 5; - private long bandwidth_ ; + // optional bool overwrite = 6; + private boolean overwrite_ ; /** - * optional int64 bandwidth = 5; + * optional bool overwrite = 6; */ - public boolean hasBandwidth() { - return ((bitField0_ & 0x00000010) == 0x00000010); + public boolean hasOverwrite() { + return ((bitField0_ & 0x00000020) == 0x00000020); } /** - * optional int64 bandwidth = 5; + * optional bool overwrite = 6; */ - public long getBandwidth() { - return bandwidth_; + public boolean getOverwrite() { + return overwrite_; } /** - * optional int64 bandwidth = 5; + * optional bool overwrite = 6; */ - public Builder setBandwidth(long value) { - bitField0_ |= 0x00000010; - bandwidth_ = value; + public Builder setOverwrite(boolean value) { + bitField0_ |= 0x00000020; + overwrite_ = value; onChanged(); return this; } /** - * optional int64 bandwidth = 5; + * optional bool overwrite = 6; */ - public Builder clearBandwidth() { - bitField0_ = (bitField0_ & ~0x00000010); - bandwidth_ = 0L; + public Builder clearOverwrite() { + bitField0_ = (bitField0_ & ~0x00000020); + overwrite_ = false; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.BackupTablesRequest) + // @@protoc_insertion_point(builder_scope:hbase.pb.RestoreTablesRequest) } static { - defaultInstance = new BackupTablesRequest(true); + defaultInstance = new RestoreTablesRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.BackupTablesRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.RestoreTablesRequest) } - public interface BackupTablesResponseOrBuilder + public interface RestoreTablesResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { // optional uint64 proc_id = 1; @@ -60411,41 +62722,26 @@ public final class MasterProtos { * optional uint64 proc_id = 1; */ long getProcId(); - - // optional string backup_id = 2; - /** - * optional string backup_id = 2; - */ - boolean hasBackupId(); - /** - * optional string backup_id = 2; - */ - java.lang.String getBackupId(); - /** - * optional string backup_id = 2; - */ - com.google.protobuf.ByteString - getBackupIdBytes(); } /** - * Protobuf type {@code hbase.pb.BackupTablesResponse} + * Protobuf type {@code hbase.pb.RestoreTablesResponse} */ - public static final class BackupTablesResponse extends + public static final class RestoreTablesResponse extends com.google.protobuf.GeneratedMessage - implements BackupTablesResponseOrBuilder { - // Use BackupTablesResponse.newBuilder() to construct. - private BackupTablesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements RestoreTablesResponseOrBuilder { + // Use RestoreTablesResponse.newBuilder() to construct. + private RestoreTablesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private BackupTablesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private RestoreTablesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final BackupTablesResponse defaultInstance; - public static BackupTablesResponse getDefaultInstance() { + private static final RestoreTablesResponse defaultInstance; + public static RestoreTablesResponse getDefaultInstance() { return defaultInstance; } - public BackupTablesResponse getDefaultInstanceForType() { + public RestoreTablesResponse getDefaultInstanceForType() { return defaultInstance; } @@ -60455,7 +62751,7 @@ public final class MasterProtos { getUnknownFields() { return this.unknownFields; } - private BackupTablesResponse( + private RestoreTablesResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -60483,11 +62779,6 @@ public final class MasterProtos { procId_ = input.readUInt64(); break; } - case 18: { - bitField0_ |= 0x00000002; - backupId_ = input.readBytes(); - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -60502,28 +62793,28 @@ public final class MasterProtos { } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public BackupTablesResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RestoreTablesResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new BackupTablesResponse(input, extensionRegistry); + return new RestoreTablesResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } @@ -60544,52 +62835,8 @@ public final class MasterProtos { return procId_; } - // optional string backup_id = 2; - public static final int BACKUP_ID_FIELD_NUMBER = 2; - private java.lang.Object backupId_; - /** - * optional string backup_id = 2; - */ - public boolean hasBackupId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string backup_id = 2; - */ - public java.lang.String getBackupId() { - java.lang.Object ref = backupId_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - backupId_ = s; - } - return s; - } - } - /** - * optional string backup_id = 2; - */ - public com.google.protobuf.ByteString - getBackupIdBytes() { - java.lang.Object ref = backupId_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - private void initFields() { procId_ = 0L; - backupId_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -60606,9 +62853,6 @@ public final class MasterProtos { if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, procId_); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getBackupIdBytes()); - } getUnknownFields().writeTo(output); } @@ -60622,10 +62866,6 @@ public final class MasterProtos { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(1, procId_); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getBackupIdBytes()); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -60643,10 +62883,10 @@ public final class MasterProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse) obj; boolean result = true; result = result && (hasProcId() == other.hasProcId()); @@ -60654,11 +62894,6 @@ public final class MasterProtos { result = result && (getProcId() == other.getProcId()); } - result = result && (hasBackupId() == other.hasBackupId()); - if (hasBackupId()) { - result = result && getBackupId() - .equals(other.getBackupId()); - } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -60676,62 +62911,58 @@ public final class MasterProtos { hash = (37 * hash) + PROC_ID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getProcId()); } - if (hasBackupId()) { - hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; - hash = (53 * hash) + getBackupId().hashCode(); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -60740,7 +62971,7 @@ public final class MasterProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -60752,24 +62983,24 @@ public final class MasterProtos { return builder; } /** - * Protobuf type {@code hbase.pb.BackupTablesResponse} + * Protobuf type {@code hbase.pb.RestoreTablesResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -60791,8 +63022,6 @@ public final class MasterProtos { super.clear(); procId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); - backupId_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -60802,57 +63031,48 @@ public final class MasterProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.procId_ = procId_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.backupId_ = backupId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance()) return this; if (other.hasProcId()) { setProcId(other.getProcId()); } - if (other.hasBackupId()) { - bitField0_ |= 0x00000002; - backupId_ = other.backupId_; - onChanged(); - } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -60865,11 +63085,11 @@ public final class MasterProtos { com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -60913,89 +63133,15 @@ public final class MasterProtos { return this; } - // optional string backup_id = 2; - private java.lang.Object backupId_ = ""; - /** - * optional string backup_id = 2; - */ - public boolean hasBackupId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string backup_id = 2; - */ - public java.lang.String getBackupId() { - java.lang.Object ref = backupId_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - backupId_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string backup_id = 2; - */ - public com.google.protobuf.ByteString - getBackupIdBytes() { - java.lang.Object ref = backupId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string backup_id = 2; - */ - public Builder setBackupId( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - backupId_ = value; - onChanged(); - return this; - } - /** - * optional string backup_id = 2; - */ - public Builder clearBackupId() { - bitField0_ = (bitField0_ & ~0x00000002); - backupId_ = getDefaultInstance().getBackupId(); - onChanged(); - return this; - } - /** - * optional string backup_id = 2; - */ - public Builder setBackupIdBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - backupId_ = value; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.BackupTablesResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.RestoreTablesResponse) } static { - defaultInstance = new BackupTablesResponse(true); + defaultInstance = new RestoreTablesResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.BackupTablesResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.RestoreTablesResponse) } /** @@ -61731,6 +63877,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc restoreTables(.hbase.pb.RestoreTablesRequest) returns (.hbase.pb.RestoreTablesResponse); + * + *
+       ** restore table set 
+       * 
+ */ + public abstract void restoreTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -62200,6 +64358,14 @@ public final class MasterProtos { impl.backupTables(controller, request, done); } + @java.lang.Override + public void restoreTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request, + com.google.protobuf.RpcCallback done) { + impl.restoreTables(controller, request, done); + } + }; } @@ -62338,6 +64504,8 @@ public final class MasterProtos { return impl.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request); case 57: return impl.backupTables(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)request); + case 58: + return impl.restoreTables(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -62468,6 +64636,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); case 57: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance(); + case 58: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -62598,6 +64768,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); case 57: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(); + case 58: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -63331,6 +65503,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc restoreTables(.hbase.pb.RestoreTablesRequest) returns (.hbase.pb.RestoreTablesResponse); + * + *
+     ** restore table set 
+     * 
+ */ + public abstract void restoreTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -63643,6 +65827,11 @@ public final class MasterProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 58: + this.restoreTables(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -63773,6 +65962,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); case 57: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance(); + case 58: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -63903,6 +66094,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); case 57: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(); + case 58: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -64793,6 +66986,21 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance())); } + + public void restoreTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(58), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -65090,6 +67298,11 @@ public final class MasterProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse restoreTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -65794,6 +68007,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse restoreTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(58), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:hbase.pb.MasterService) @@ -66354,6 +68579,16 @@ public final class MasterProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RestoreTablesRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_RestoreTablesRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RestoreTablesResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_RestoreTablesResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -66560,142 +68795,151 @@ public final class MasterProtos { "bleName\022\027\n\017target_root_dir\030\003 \002(\t\022\017\n\007work" + "ers\030\004 \001(\003\022\021\n\tbandwidth\030\005 \001(\003\":\n\024BackupTa" + "blesResponse\022\017\n\007proc_id\030\001 \001(\004\022\021\n\tbackup_" + - "id\030\002 \001(\t*(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022" + - "\t\n\005MERGE\020\0012\242)\n\rMasterService\022e\n\024GetSchem" + - "aAlterStatus\022%.hbase.pb.GetSchemaAlterSt", - "atusRequest\032&.hbase.pb.GetSchemaAlterSta" + - "tusResponse\022b\n\023GetTableDescriptors\022$.hba" + - "se.pb.GetTableDescriptorsRequest\032%.hbase" + - ".pb.GetTableDescriptorsResponse\022P\n\rGetTa" + - "bleNames\022\036.hbase.pb.GetTableNamesRequest" + - "\032\037.hbase.pb.GetTableNamesResponse\022Y\n\020Get" + - "ClusterStatus\022!.hbase.pb.GetClusterStatu" + - "sRequest\032\".hbase.pb.GetClusterStatusResp" + - "onse\022V\n\017IsMasterRunning\022 .hbase.pb.IsMas" + - "terRunningRequest\032!.hbase.pb.IsMasterRun", - "ningResponse\022D\n\tAddColumn\022\032.hbase.pb.Add" + - "ColumnRequest\032\033.hbase.pb.AddColumnRespon" + - "se\022M\n\014DeleteColumn\022\035.hbase.pb.DeleteColu" + - "mnRequest\032\036.hbase.pb.DeleteColumnRespons" + - "e\022M\n\014ModifyColumn\022\035.hbase.pb.ModifyColum" + - "nRequest\032\036.hbase.pb.ModifyColumnResponse" + - "\022G\n\nMoveRegion\022\033.hbase.pb.MoveRegionRequ" + - "est\032\034.hbase.pb.MoveRegionResponse\022k\n\026Dis" + - "patchMergingRegions\022\'.hbase.pb.DispatchM" + - "ergingRegionsRequest\032(.hbase.pb.Dispatch", - "MergingRegionsResponse\022M\n\014AssignRegion\022\035" + - ".hbase.pb.AssignRegionRequest\032\036.hbase.pb" + - ".AssignRegionResponse\022S\n\016UnassignRegion\022" + - "\037.hbase.pb.UnassignRegionRequest\032 .hbase" + - ".pb.UnassignRegionResponse\022P\n\rOfflineReg" + - "ion\022\036.hbase.pb.OfflineRegionRequest\032\037.hb" + - "ase.pb.OfflineRegionResponse\022J\n\013DeleteTa" + - "ble\022\034.hbase.pb.DeleteTableRequest\032\035.hbas" + - "e.pb.DeleteTableResponse\022P\n\rtruncateTabl" + - "e\022\036.hbase.pb.TruncateTableRequest\032\037.hbas", - "e.pb.TruncateTableResponse\022J\n\013EnableTabl" + - "e\022\034.hbase.pb.EnableTableRequest\032\035.hbase." + - "pb.EnableTableResponse\022M\n\014DisableTable\022\035" + - ".hbase.pb.DisableTableRequest\032\036.hbase.pb" + - ".DisableTableResponse\022J\n\013ModifyTable\022\034.h" + - "base.pb.ModifyTableRequest\032\035.hbase.pb.Mo" + - "difyTableResponse\022J\n\013CreateTable\022\034.hbase" + - ".pb.CreateTableRequest\032\035.hbase.pb.Create" + - "TableResponse\022A\n\010Shutdown\022\031.hbase.pb.Shu" + - "tdownRequest\032\032.hbase.pb.ShutdownResponse", - "\022G\n\nStopMaster\022\033.hbase.pb.StopMasterRequ" + - "est\032\034.hbase.pb.StopMasterResponse\022>\n\007Bal" + - "ance\022\030.hbase.pb.BalanceRequest\032\031.hbase.p" + - "b.BalanceResponse\022_\n\022SetBalancerRunning\022" + - "#.hbase.pb.SetBalancerRunningRequest\032$.h" + - "base.pb.SetBalancerRunningResponse\022\\\n\021Is" + - "BalancerEnabled\022\".hbase.pb.IsBalancerEna" + - "bledRequest\032#.hbase.pb.IsBalancerEnabled" + - "Response\022k\n\026SetSplitOrMergeEnabled\022\'.hba" + - "se.pb.SetSplitOrMergeEnabledRequest\032(.hb", - "ase.pb.SetSplitOrMergeEnabledResponse\022h\n" + - "\025IsSplitOrMergeEnabled\022&.hbase.pb.IsSpli" + - "tOrMergeEnabledRequest\032\'.hbase.pb.IsSpli" + - "tOrMergeEnabledResponse\022D\n\tNormalize\022\032.h" + - "base.pb.NormalizeRequest\032\033.hbase.pb.Norm" + - "alizeResponse\022e\n\024SetNormalizerRunning\022%." + - "hbase.pb.SetNormalizerRunningRequest\032&.h" + - "base.pb.SetNormalizerRunningResponse\022b\n\023" + - "IsNormalizerEnabled\022$.hbase.pb.IsNormali" + - "zerEnabledRequest\032%.hbase.pb.IsNormalize", - "rEnabledResponse\022S\n\016RunCatalogScan\022\037.hba" + - "se.pb.RunCatalogScanRequest\032 .hbase.pb.R" + - "unCatalogScanResponse\022e\n\024EnableCatalogJa" + - "nitor\022%.hbase.pb.EnableCatalogJanitorReq" + - "uest\032&.hbase.pb.EnableCatalogJanitorResp" + - "onse\022n\n\027IsCatalogJanitorEnabled\022(.hbase." + - "pb.IsCatalogJanitorEnabledRequest\032).hbas" + - "e.pb.IsCatalogJanitorEnabledResponse\022^\n\021" + - "ExecMasterService\022#.hbase.pb.Coprocessor" + - "ServiceRequest\032$.hbase.pb.CoprocessorSer", - "viceResponse\022A\n\010Snapshot\022\031.hbase.pb.Snap" + - "shotRequest\032\032.hbase.pb.SnapshotResponse\022" + - "h\n\025GetCompletedSnapshots\022&.hbase.pb.GetC" + - "ompletedSnapshotsRequest\032\'.hbase.pb.GetC" + - "ompletedSnapshotsResponse\022S\n\016DeleteSnaps" + - "hot\022\037.hbase.pb.DeleteSnapshotRequest\032 .h" + - "base.pb.DeleteSnapshotResponse\022S\n\016IsSnap" + - "shotDone\022\037.hbase.pb.IsSnapshotDoneReques" + - "t\032 .hbase.pb.IsSnapshotDoneResponse\022V\n\017R" + - "estoreSnapshot\022 .hbase.pb.RestoreSnapsho", - "tRequest\032!.hbase.pb.RestoreSnapshotRespo" + - "nse\022h\n\025IsRestoreSnapshotDone\022&.hbase.pb." + - "IsRestoreSnapshotDoneRequest\032\'.hbase.pb." + - "IsRestoreSnapshotDoneResponse\022P\n\rExecPro" + - "cedure\022\036.hbase.pb.ExecProcedureRequest\032\037" + - ".hbase.pb.ExecProcedureResponse\022W\n\024ExecP" + - "rocedureWithRet\022\036.hbase.pb.ExecProcedure" + - "Request\032\037.hbase.pb.ExecProcedureResponse" + - "\022V\n\017IsProcedureDone\022 .hbase.pb.IsProcedu" + - "reDoneRequest\032!.hbase.pb.IsProcedureDone", - "Response\022V\n\017ModifyNamespace\022 .hbase.pb.M" + - "odifyNamespaceRequest\032!.hbase.pb.ModifyN" + - "amespaceResponse\022V\n\017CreateNamespace\022 .hb" + - "ase.pb.CreateNamespaceRequest\032!.hbase.pb" + - ".CreateNamespaceResponse\022V\n\017DeleteNamesp" + - "ace\022 .hbase.pb.DeleteNamespaceRequest\032!." + - "hbase.pb.DeleteNamespaceResponse\022k\n\026GetN" + - "amespaceDescriptor\022\'.hbase.pb.GetNamespa" + - "ceDescriptorRequest\032(.hbase.pb.GetNamesp" + - "aceDescriptorResponse\022q\n\030ListNamespaceDe", - "scriptors\022).hbase.pb.ListNamespaceDescri" + - "ptorsRequest\032*.hbase.pb.ListNamespaceDes" + - "criptorsResponse\022\206\001\n\037ListTableDescriptor" + - "sByNamespace\0220.hbase.pb.ListTableDescrip" + - "torsByNamespaceRequest\0321.hbase.pb.ListTa" + - "bleDescriptorsByNamespaceResponse\022t\n\031Lis" + - "tTableNamesByNamespace\022*.hbase.pb.ListTa" + - "bleNamesByNamespaceRequest\032+.hbase.pb.Li" + - "stTableNamesByNamespaceResponse\022P\n\rGetTa" + - "bleState\022\036.hbase.pb.GetTableStateRequest", - "\032\037.hbase.pb.GetTableStateResponse\022A\n\010Set" + - "Quota\022\031.hbase.pb.SetQuotaRequest\032\032.hbase" + - ".pb.SetQuotaResponse\022x\n\037getLastMajorComp" + - "actionTimestamp\022).hbase.pb.MajorCompacti" + - "onTimestampRequest\032*.hbase.pb.MajorCompa" + - "ctionTimestampResponse\022\212\001\n(getLastMajorC" + - "ompactionTimestampForRegion\0222.hbase.pb.M" + - "ajorCompactionTimestampForRegionRequest\032" + - "*.hbase.pb.MajorCompactionTimestampRespo" + - "nse\022_\n\022getProcedureResult\022#.hbase.pb.Get", - "ProcedureResultRequest\032$.hbase.pb.GetPro" + - "cedureResultResponse\022h\n\027getSecurityCapab" + - "ilities\022%.hbase.pb.SecurityCapabilitiesR" + - "equest\032&.hbase.pb.SecurityCapabilitiesRe" + - "sponse\022S\n\016AbortProcedure\022\037.hbase.pb.Abor" + - "tProcedureRequest\032 .hbase.pb.AbortProced" + - "ureResponse\022S\n\016ListProcedures\022\037.hbase.pb" + - ".ListProceduresRequest\032 .hbase.pb.ListPr" + - "oceduresResponse\022M\n\014backupTables\022\035.hbase" + - ".pb.BackupTablesRequest\032\036.hbase.pb.Backu", - "pTablesResponseBB\n*org.apache.hadoop.hba" + - "se.protobuf.generatedB\014MasterProtosH\001\210\001\001" + - "\240\001\001" + "id\030\002 \001(\t\"\305\001\n\024RestoreTablesRequest\022\021\n\tbac" + + "kup_id\030\001 \002(\t\022#\n\006tables\030\002 \003(\0132\023.hbase.pb." + + "TableName\022*\n\rtarget_tables\030\003 \003(\0132\023.hbase", + ".pb.TableName\022\027\n\017backup_root_dir\030\004 \002(\t\022\035" + + "\n\025dependency_check_only\030\005 \001(\010\022\021\n\toverwri" + + "te\030\006 \001(\010\"(\n\025RestoreTablesResponse\022\017\n\007pro" + + "c_id\030\001 \001(\004*(\n\020MasterSwitchType\022\t\n\005SPLIT\020" + + "\000\022\t\n\005MERGE\020\001*8\n\022RestoreTablesState\022\016\n\nVA" + + "LIDATION\020\001\022\022\n\016RESTORE_IMAGES\020\0022\364)\n\rMaste" + + "rService\022e\n\024GetSchemaAlterStatus\022%.hbase" + + ".pb.GetSchemaAlterStatusRequest\032&.hbase." + + "pb.GetSchemaAlterStatusResponse\022b\n\023GetTa" + + "bleDescriptors\022$.hbase.pb.GetTableDescri", + "ptorsRequest\032%.hbase.pb.GetTableDescript" + + "orsResponse\022P\n\rGetTableNames\022\036.hbase.pb." + + "GetTableNamesRequest\032\037.hbase.pb.GetTable" + + "NamesResponse\022Y\n\020GetClusterStatus\022!.hbas" + + "e.pb.GetClusterStatusRequest\032\".hbase.pb." + + "GetClusterStatusResponse\022V\n\017IsMasterRunn" + + "ing\022 .hbase.pb.IsMasterRunningRequest\032!." + + "hbase.pb.IsMasterRunningResponse\022D\n\tAddC" + + "olumn\022\032.hbase.pb.AddColumnRequest\032\033.hbas" + + "e.pb.AddColumnResponse\022M\n\014DeleteColumn\022\035", + ".hbase.pb.DeleteColumnRequest\032\036.hbase.pb" + + ".DeleteColumnResponse\022M\n\014ModifyColumn\022\035." + + "hbase.pb.ModifyColumnRequest\032\036.hbase.pb." + + "ModifyColumnResponse\022G\n\nMoveRegion\022\033.hba" + + "se.pb.MoveRegionRequest\032\034.hbase.pb.MoveR" + + "egionResponse\022k\n\026DispatchMergingRegions\022" + + "\'.hbase.pb.DispatchMergingRegionsRequest" + + "\032(.hbase.pb.DispatchMergingRegionsRespon" + + "se\022M\n\014AssignRegion\022\035.hbase.pb.AssignRegi" + + "onRequest\032\036.hbase.pb.AssignRegionRespons", + "e\022S\n\016UnassignRegion\022\037.hbase.pb.UnassignR" + + "egionRequest\032 .hbase.pb.UnassignRegionRe" + + "sponse\022P\n\rOfflineRegion\022\036.hbase.pb.Offli" + + "neRegionRequest\032\037.hbase.pb.OfflineRegion" + + "Response\022J\n\013DeleteTable\022\034.hbase.pb.Delet" + + "eTableRequest\032\035.hbase.pb.DeleteTableResp" + + "onse\022P\n\rtruncateTable\022\036.hbase.pb.Truncat" + + "eTableRequest\032\037.hbase.pb.TruncateTableRe" + + "sponse\022J\n\013EnableTable\022\034.hbase.pb.EnableT" + + "ableRequest\032\035.hbase.pb.EnableTableRespon", + "se\022M\n\014DisableTable\022\035.hbase.pb.DisableTab" + + "leRequest\032\036.hbase.pb.DisableTableRespons" + + "e\022J\n\013ModifyTable\022\034.hbase.pb.ModifyTableR" + + "equest\032\035.hbase.pb.ModifyTableResponse\022J\n" + + "\013CreateTable\022\034.hbase.pb.CreateTableReque" + + "st\032\035.hbase.pb.CreateTableResponse\022A\n\010Shu" + + "tdown\022\031.hbase.pb.ShutdownRequest\032\032.hbase" + + ".pb.ShutdownResponse\022G\n\nStopMaster\022\033.hba" + + "se.pb.StopMasterRequest\032\034.hbase.pb.StopM" + + "asterResponse\022>\n\007Balance\022\030.hbase.pb.Bala", + "nceRequest\032\031.hbase.pb.BalanceResponse\022_\n" + + "\022SetBalancerRunning\022#.hbase.pb.SetBalanc" + + "erRunningRequest\032$.hbase.pb.SetBalancerR" + + "unningResponse\022\\\n\021IsBalancerEnabled\022\".hb" + + "ase.pb.IsBalancerEnabledRequest\032#.hbase." + + "pb.IsBalancerEnabledResponse\022k\n\026SetSplit" + + "OrMergeEnabled\022\'.hbase.pb.SetSplitOrMerg" + + "eEnabledRequest\032(.hbase.pb.SetSplitOrMer" + + "geEnabledResponse\022h\n\025IsSplitOrMergeEnabl" + + "ed\022&.hbase.pb.IsSplitOrMergeEnabledReque", + "st\032\'.hbase.pb.IsSplitOrMergeEnabledRespo" + + "nse\022D\n\tNormalize\022\032.hbase.pb.NormalizeReq" + + "uest\032\033.hbase.pb.NormalizeResponse\022e\n\024Set" + + "NormalizerRunning\022%.hbase.pb.SetNormaliz" + + "erRunningRequest\032&.hbase.pb.SetNormalize" + + "rRunningResponse\022b\n\023IsNormalizerEnabled\022" + + "$.hbase.pb.IsNormalizerEnabledRequest\032%." + + "hbase.pb.IsNormalizerEnabledResponse\022S\n\016" + + "RunCatalogScan\022\037.hbase.pb.RunCatalogScan" + + "Request\032 .hbase.pb.RunCatalogScanRespons", + "e\022e\n\024EnableCatalogJanitor\022%.hbase.pb.Ena" + + "bleCatalogJanitorRequest\032&.hbase.pb.Enab" + + "leCatalogJanitorResponse\022n\n\027IsCatalogJan" + + "itorEnabled\022(.hbase.pb.IsCatalogJanitorE" + + "nabledRequest\032).hbase.pb.IsCatalogJanito" + + "rEnabledResponse\022^\n\021ExecMasterService\022#." + + "hbase.pb.CoprocessorServiceRequest\032$.hba" + + "se.pb.CoprocessorServiceResponse\022A\n\010Snap" + + "shot\022\031.hbase.pb.SnapshotRequest\032\032.hbase." + + "pb.SnapshotResponse\022h\n\025GetCompletedSnaps", + "hots\022&.hbase.pb.GetCompletedSnapshotsReq" + + "uest\032\'.hbase.pb.GetCompletedSnapshotsRes" + + "ponse\022S\n\016DeleteSnapshot\022\037.hbase.pb.Delet" + + "eSnapshotRequest\032 .hbase.pb.DeleteSnapsh" + + "otResponse\022S\n\016IsSnapshotDone\022\037.hbase.pb." + + "IsSnapshotDoneRequest\032 .hbase.pb.IsSnaps" + + "hotDoneResponse\022V\n\017RestoreSnapshot\022 .hba" + + "se.pb.RestoreSnapshotRequest\032!.hbase.pb." + + "RestoreSnapshotResponse\022h\n\025IsRestoreSnap" + + "shotDone\022&.hbase.pb.IsRestoreSnapshotDon", + "eRequest\032\'.hbase.pb.IsRestoreSnapshotDon" + + "eResponse\022P\n\rExecProcedure\022\036.hbase.pb.Ex" + + "ecProcedureRequest\032\037.hbase.pb.ExecProced" + + "ureResponse\022W\n\024ExecProcedureWithRet\022\036.hb" + + "ase.pb.ExecProcedureRequest\032\037.hbase.pb.E" + + "xecProcedureResponse\022V\n\017IsProcedureDone\022" + + " .hbase.pb.IsProcedureDoneRequest\032!.hbas" + + "e.pb.IsProcedureDoneResponse\022V\n\017ModifyNa" + + "mespace\022 .hbase.pb.ModifyNamespaceReques" + + "t\032!.hbase.pb.ModifyNamespaceResponse\022V\n\017", + "CreateNamespace\022 .hbase.pb.CreateNamespa" + + "ceRequest\032!.hbase.pb.CreateNamespaceResp" + + "onse\022V\n\017DeleteNamespace\022 .hbase.pb.Delet" + + "eNamespaceRequest\032!.hbase.pb.DeleteNames" + + "paceResponse\022k\n\026GetNamespaceDescriptor\022\'" + + ".hbase.pb.GetNamespaceDescriptorRequest\032" + + "(.hbase.pb.GetNamespaceDescriptorRespons" + + "e\022q\n\030ListNamespaceDescriptors\022).hbase.pb" + + ".ListNamespaceDescriptorsRequest\032*.hbase" + + ".pb.ListNamespaceDescriptorsResponse\022\206\001\n", + "\037ListTableDescriptorsByNamespace\0220.hbase" + + ".pb.ListTableDescriptorsByNamespaceReque" + + "st\0321.hbase.pb.ListTableDescriptorsByName" + + "spaceResponse\022t\n\031ListTableNamesByNamespa" + + "ce\022*.hbase.pb.ListTableNamesByNamespaceR" + + "equest\032+.hbase.pb.ListTableNamesByNamesp" + + "aceResponse\022P\n\rGetTableState\022\036.hbase.pb." + + "GetTableStateRequest\032\037.hbase.pb.GetTable" + + "StateResponse\022A\n\010SetQuota\022\031.hbase.pb.Set" + + "QuotaRequest\032\032.hbase.pb.SetQuotaResponse", + "\022x\n\037getLastMajorCompactionTimestamp\022).hb" + + "ase.pb.MajorCompactionTimestampRequest\032*" + + ".hbase.pb.MajorCompactionTimestampRespon" + + "se\022\212\001\n(getLastMajorCompactionTimestampFo" + + "rRegion\0222.hbase.pb.MajorCompactionTimest" + + "ampForRegionRequest\032*.hbase.pb.MajorComp" + + "actionTimestampResponse\022_\n\022getProcedureR" + + "esult\022#.hbase.pb.GetProcedureResultReque" + + "st\032$.hbase.pb.GetProcedureResultResponse" + + "\022h\n\027getSecurityCapabilities\022%.hbase.pb.S", + "ecurityCapabilitiesRequest\032&.hbase.pb.Se" + + "curityCapabilitiesResponse\022S\n\016AbortProce" + + "dure\022\037.hbase.pb.AbortProcedureRequest\032 ." + + "hbase.pb.AbortProcedureResponse\022S\n\016ListP" + + "rocedures\022\037.hbase.pb.ListProceduresReque" + + "st\032 .hbase.pb.ListProceduresResponse\022M\n\014" + + "backupTables\022\035.hbase.pb.BackupTablesRequ" + + "est\032\036.hbase.pb.BackupTablesResponse\022P\n\rr" + + "estoreTables\022\036.hbase.pb.RestoreTablesReq" + + "uest\032\037.hbase.pb.RestoreTablesResponseBB\n", + "*org.apache.hadoop.hbase.protobuf.genera" + + "tedB\014MasterProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -67368,6 +69612,18 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BackupTablesResponse_descriptor, new java.lang.String[] { "ProcId", "BackupId", }); + internal_static_hbase_pb_RestoreTablesRequest_descriptor = + getDescriptor().getMessageTypes().get(111); + internal_static_hbase_pb_RestoreTablesRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_RestoreTablesRequest_descriptor, + new java.lang.String[] { "BackupId", "Tables", "TargetTables", "BackupRootDir", "DependencyCheckOnly", "Overwrite", }); + internal_static_hbase_pb_RestoreTablesResponse_descriptor = + getDescriptor().getMessageTypes().get(112); + internal_static_hbase_pb_RestoreTablesResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_RestoreTablesResponse_descriptor, + new java.lang.String[] { "ProcId", }); return null; } }; diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto index 6431c73..20b4ee3 100644 --- a/hbase-protocol/src/main/protobuf/Master.proto +++ b/hbase-protocol/src/main/protobuf/Master.proto @@ -554,6 +554,24 @@ message BackupTablesResponse { optional string backup_id = 2; } +enum RestoreTablesState { + VALIDATION = 1; + RESTORE_IMAGES = 2; +} + +message RestoreTablesRequest { + required string backup_id = 1; + repeated TableName tables = 2; + repeated TableName target_tables = 3; + required string backup_root_dir = 4; + optional bool dependency_check_only = 5; + optional bool overwrite = 6; +} + +message RestoreTablesResponse { + optional uint64 proc_id = 1; +} + service MasterService { /** Used by the client to get the number of regions that have received the updated schema */ rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest) @@ -832,4 +850,8 @@ service MasterService { /** backup table set */ rpc backupTables(BackupTablesRequest) returns(BackupTablesResponse); + + /** restore table set */ + rpc restoreTables(RestoreTablesRequest) + returns(RestoreTablesResponse); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java index c66ac6e..6b4fb4a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java @@ -30,6 +30,8 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; import org.apache.hadoop.hbase.backup.util.BackupServerUtil; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; +import org.apache.hadoop.hbase.client.BackupAdmin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.util.AbstractHBaseTool; @@ -153,10 +155,10 @@ public class RestoreDriver extends AbstractHBaseTool { } - RestoreClient client = BackupRestoreClientFactory.getRestoreClient(getConf()); - try{ - client.restore(backupRootDir, backupId, check, sTableArray, - tTableArray, isOverwrite); + try (final Connection conn = ConnectionFactory.createConnection(conf); + BackupAdmin client = conn.getAdmin().getBackupAdmin();) { + client.restore(RestoreServerUtil.createRestoreRequest(backupRootDir, backupId, check, + sTableArray, tTableArray, isOverwrite)); } catch (Exception e){ e.printStackTrace(); return -5; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java deleted file mode 100644 index a04fc08..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java +++ /dev/null @@ -1,295 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.backup.impl; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map.Entry; -import java.util.TreeSet; - -import org.apache.commons.lang.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.backup.BackupType; -import org.apache.hadoop.hbase.backup.HBackupFileSystem; -import org.apache.hadoop.hbase.backup.RestoreClient; -import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; -import org.apache.hadoop.hbase.backup.util.BackupClientUtil; -import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; - -/** - * The main class which interprets the given arguments and trigger restore operation. - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -public final class RestoreClientImpl implements RestoreClient { - - private static final Log LOG = LogFactory.getLog(RestoreClientImpl.class); - private Configuration conf; - - public RestoreClientImpl() { - } - - @Override - public void setConf(Configuration conf) { - this.conf = conf; - } - - - - /** - * Restore operation. Stage 1: validate backupManifest, and check target tables - * @param backupRootDir The root dir for backup image - * @param backupId The backup id for image to be restored - * @param check True if only do dependency check - * @param sTableArray The array of tables to be restored - * @param tTableArray The array of mapping tables to restore to - * @param isOverwrite True then do restore overwrite if target table exists, otherwise fail the - * request if target table exists - * @throws IOException if any failure during restore - */ - @Override - public void restore(String backupRootDir, - String backupId, boolean check, TableName[] sTableArray, - TableName[] tTableArray, boolean isOverwrite) throws IOException { - - HashMap backupManifestMap = new HashMap<>(); - // check and load backup image manifest for the tables - Path rootPath = new Path(backupRootDir); - HBackupFileSystem.checkImageManifestExist(backupManifestMap, sTableArray, conf, rootPath, - backupId); - try { - // Check and validate the backup image and its dependencies - if (check) { - if (validate(backupManifestMap)) { - LOG.info("Checking backup images: ok"); - } else { - String errMsg = "Some dependencies are missing for restore"; - LOG.error(errMsg); - throw new IOException(errMsg); - } - } - - if (tTableArray == null) { - tTableArray = sTableArray; - } - // check the target tables - checkTargetTables(tTableArray, isOverwrite); - // start restore process - restoreStage(backupManifestMap, sTableArray, tTableArray, isOverwrite); - LOG.info("Restore for " + Arrays.asList(sTableArray) + " are successful!"); - } catch (IOException e) { - LOG.error("ERROR: restore failed with error: " + e.getMessage()); - throw e; - } - - } - - private boolean validate(HashMap backupManifestMap) - throws IOException { - boolean isValid = true; - - for (Entry manifestEntry : backupManifestMap.entrySet()) { - TableName table = manifestEntry.getKey(); - TreeSet imageSet = new TreeSet(); - - ArrayList depList = manifestEntry.getValue().getDependentListByTable(table); - if (depList != null && !depList.isEmpty()) { - imageSet.addAll(depList); - } - - LOG.info("Dependent image(s) from old to new:"); - for (BackupImage image : imageSet) { - String imageDir = - HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table); - if (!BackupClientUtil.checkPathExist(imageDir, conf)) { - LOG.error("ERROR: backup image does not exist: " + imageDir); - isValid = false; - break; - } - // TODO More validation? - LOG.info("Backup image: " + image.getBackupId() + " for '" + table + "' is available"); - } - } - - return isValid; - } - - /** - * Validate target Tables - * @param tTableArray: target tables - * @param isOverwrite overwrite existing table - * @throws IOException exception - */ - private void checkTargetTables(TableName[] tTableArray, boolean isOverwrite) - throws IOException { - ArrayList existTableList = new ArrayList<>(); - ArrayList disabledTableList = new ArrayList<>(); - - // check if the tables already exist - try(Connection conn = ConnectionFactory.createConnection(conf); - Admin admin = conn.getAdmin()) { - for (TableName tableName : tTableArray) { - if (admin.tableExists(tableName)) { - existTableList.add(tableName); - if (admin.isTableDisabled(tableName)) { - disabledTableList.add(tableName); - } - } else { - LOG.info("HBase table " + tableName - + " does not exist. It will be created during restore process"); - } - } - } - - if (existTableList.size() > 0) { - if (!isOverwrite) { - LOG.error("Existing table found in the restore target, please add \"-overwrite\" " - + "option in the command if you mean to restore to these existing tables"); - LOG.info("Existing table list in restore target: " + existTableList); - throw new IOException("Existing table found in target while no \"-overwrite\" " - + "option found"); - } else { - if (disabledTableList.size() > 0) { - LOG.error("Found offline table in the restore target, " - + "please enable them before restore with \"-overwrite\" option"); - LOG.info("Offline table list in restore target: " + disabledTableList); - throw new IOException( - "Found offline table in the target when restore with \"-overwrite\" option"); - } - } - } - } - - /** - * Restore operation. Stage 2: resolved Backup Image dependency - * @param backupManifestMap : tableName, Manifest - * @param sTableArray The array of tables to be restored - * @param tTableArray The array of mapping tables to restore to - * @return set of BackupImages restored - * @throws IOException exception - */ - private void restoreStage(HashMap backupManifestMap, - TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException { - TreeSet restoreImageSet = new TreeSet(); - boolean truncateIfExists = isOverwrite; - try { - for (int i = 0; i < sTableArray.length; i++) { - TableName table = sTableArray[i]; - BackupManifest manifest = backupManifestMap.get(table); - // Get the image list of this backup for restore in time order from old - // to new. - List list = new ArrayList(); - list.add(manifest.getBackupImage()); - List depList = manifest.getDependentListByTable(table); - list.addAll(depList); - TreeSet restoreList = new TreeSet(list); - LOG.debug("need to clear merged Image. to be implemented in future jira"); - restoreImages(restoreList.iterator(), table, tTableArray[i], truncateIfExists); - restoreImageSet.addAll(restoreList); - - if (restoreImageSet != null && !restoreImageSet.isEmpty()) { - LOG.info("Restore includes the following image(s):"); - for (BackupImage image : restoreImageSet) { - LOG.info("Backup: " - + image.getBackupId() - + " " - + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), - table)); - } - } - } - } catch (Exception e) { - LOG.error("Failed", e); - throw new IOException(e); - } - LOG.debug("restoreStage finished"); - - } - - /** - * Restore operation handle each backupImage in iterator - * @param it: backupImage iterator - ascending - * @param sTable: table to be restored - * @param tTable: table to be restored to - * @throws IOException exception - */ - private void restoreImages(Iterator it, TableName sTable, - TableName tTable, boolean truncateIfExists) - throws IOException { - - // First image MUST be image of a FULL backup - BackupImage image = it.next(); - - String rootDir = image.getRootDir(); - String backupId = image.getBackupId(); - Path backupRoot = new Path(rootDir); - - // We need hFS only for full restore (see the code) - RestoreServerUtil restoreTool = new RestoreServerUtil(conf, backupRoot, backupId); - BackupManifest manifest = HBackupFileSystem.getManifest(sTable, conf, backupRoot, backupId); - - Path tableBackupPath = HBackupFileSystem.getTableBackupPath(sTable, backupRoot, backupId); - - // TODO: convert feature will be provided in a future JIRA - boolean converted = false; - - if (manifest.getType() == BackupType.FULL || converted) { - LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from " - + (converted ? "converted" : "full") + " backup image " + tableBackupPath.toString()); - restoreTool.fullRestoreTable(tableBackupPath, sTable, tTable, - converted, truncateIfExists); - - } else { // incremental Backup - throw new IOException("Unexpected backup type " + image.getType()); - } - - // The rest one are incremental - if (it.hasNext()) { - List logDirList = new ArrayList(); - while (it.hasNext()) { - BackupImage im = it.next(); - String logBackupDir = HBackupFileSystem.getLogBackupDir(im.getRootDir(), im.getBackupId()); - logDirList.add(logBackupDir); - } - String logDirs = StringUtils.join(logDirList, ","); - LOG.info("Restoring '" + sTable + "' to '" + tTable - + "' from log dirs: " + logDirs); - String[] sarr = new String[logDirList.size()]; - logDirList.toArray(sarr); - Path[] paths = org.apache.hadoop.util.StringUtils.stringToPath(sarr); - restoreTool.incrementalRestoreTable(paths, new TableName[] { sTable }, - new TableName[] { tTable }); - } - LOG.info(sTable + " has been successfully restored to " + tTable); - } - -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java index 1b05aa9..f37b4fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreServerUtil.java @@ -22,7 +22,9 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.Map.Entry; import java.util.TreeMap; +import java.util.TreeSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -37,6 +39,9 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupRestoreServerFactory; import org.apache.hadoop.hbase.backup.HBackupFileSystem; import org.apache.hadoop.hbase.backup.IncrementalRestoreService; +import org.apache.hadoop.hbase.backup.RestoreRequest; +import org.apache.hadoop.hbase.backup.impl.BackupManifest; +import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; @@ -615,4 +620,47 @@ public class RestoreServerUtil { } } + public static boolean validate(HashMap backupManifestMap, + Configuration conf) throws IOException { + boolean isValid = true; + + for (Entry manifestEntry : backupManifestMap.entrySet()) { + TableName table = manifestEntry.getKey(); + TreeSet imageSet = new TreeSet(); + + ArrayList depList = manifestEntry.getValue().getDependentListByTable(table); + if (depList != null && !depList.isEmpty()) { + imageSet.addAll(depList); + } + + LOG.info("Dependent image(s) from old to new:"); + for (BackupImage image : imageSet) { + String imageDir = + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), table); + if (!BackupClientUtil.checkPathExist(imageDir, conf)) { + LOG.error("ERROR: backup image does not exist: " + imageDir); + isValid = false; + break; + } + LOG.info("Backup image: " + image.getBackupId() + " for '" + table + "' is available"); + } + } + + return isValid; + } + + /** + * Create restore request. + * + */ + public static RestoreRequest createRestoreRequest( + String backupRootDir, + String backupId, boolean check, TableName[] fromTables, + TableName[] toTables, boolean isOverwrite) { + RestoreRequest request = new RestoreRequest(); + request.setBackupRootDir(backupRootDir).setBackupId(backupId).setCheck(check) + .setFromTables(fromTables).setToTables(toTables).setOverwrite(isOverwrite); + return request; + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index bbd7107..b932d03 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -30,6 +30,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -79,10 +80,13 @@ import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.HBackupFileSystem; import org.apache.hadoop.hbase.backup.impl.BackupManager; +import org.apache.hadoop.hbase.backup.impl.BackupManifest; import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.RestoreTablesProcedure; import org.apache.hadoop.hbase.backup.master.FullTableBackupProcedure; import org.apache.hadoop.hbase.backup.master.IncrementalTableBackupProcedure; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -2695,6 +2699,35 @@ public class HMaster extends HRegionServer implements MasterServices { return tableList; } + @Override + public long restoreTables(String backupRootDir, + String backupId, boolean check, List sTableList, + List tTableList, boolean isOverwrite) throws IOException { + if (check) { + HashMap backupManifestMap = new HashMap<>(); + // check and load backup image manifest for the tables + Path rootPath = new Path(backupRootDir); + HBackupFileSystem.checkImageManifestExist(backupManifestMap, + sTableList.toArray(new TableName[sTableList.size()]), + conf, rootPath, backupId); + + // Check and validate the backup image and its dependencies + if (check) { + if (RestoreServerUtil.validate(backupManifestMap, conf)) { + LOG.info("Checking backup images: ok"); + } else { + String errMsg = "Some dependencies are missing for restore"; + LOG.error(errMsg); + throw new IOException(errMsg); + } + } + } + long procId = this.procedureExecutor.submitProcedure( + new RestoreTablesProcedure(procedureExecutor.getEnvironment(), backupRootDir, backupId, + sTableList, tTableList, isOverwrite)); + return procId; + } + /** * Returns the list of table descriptors that match the specified request * @param namespace the namespace to query, or null if querying for all diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 930bfd6..1e51193 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1072,6 +1072,27 @@ public class MasterRpcServices extends RSRpcServices } @Override + public MasterProtos.RestoreTablesResponse restoreTables( + RpcController controller, + MasterProtos.RestoreTablesRequest request) throws ServiceException { + try { + RestoreTablesResponse.Builder response = RestoreTablesResponse.newBuilder(); + List tablesList = new ArrayList<>(request.getTablesList().size()); + for (HBaseProtos.TableName table : request.getTablesList()) { + tablesList.add(ProtobufUtil.toTableName(table)); + } + List targetTablesList = new ArrayList<>(request.getTargetTablesList().size()); + for (HBaseProtos.TableName table : request.getTargetTablesList()) { + targetTablesList.add(ProtobufUtil.toTableName(table)); + } + long procId = master.restoreTables(request.getBackupRootDir(), request.getBackupId(), + request.getDependencyCheckOnly(), tablesList, targetTablesList, request.getOverwrite()); + return response.setProcId(procId).build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + @Override public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c, ListTableDescriptorsByNamespaceRequest request) throws ServiceException { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 3557bb4..f5667b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -198,6 +198,13 @@ public interface MasterServices extends Server { final int workers, final long bandwidth) throws IOException; + /* + * Restore table set + */ + public long restoreTables(String backupRootDir, + String backupId, boolean check, List sTableList, + List tTableList, boolean isOverwrite) throws IOException; + /** * Enable an existing table * @param tableName The table name diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java index 56983a9..5356d2d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; @InterfaceStability.Evolving public interface TableProcedureInterface { public enum TableOperationType { - CREATE, DELETE, DISABLE, EDIT, ENABLE, READ, BACKUP, + CREATE, DELETE, DISABLE, EDIT, ENABLE, READ, BACKUP, RESTORE, }; /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java index 3982b1d..5fff569 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java @@ -252,21 +252,7 @@ public class TestBackupBase { protected BackupAdmin getBackupAdmin() throws IOException { return TEST_UTIL.getAdmin().getBackupAdmin(); } - - /** - * Get restore request. - * - */ - public RestoreRequest createRestoreRequest( - String backupRootDir, - String backupId, boolean check, TableName[] fromTables, - TableName[] toTables, boolean isOverwrite) { - RestoreRequest request = new RestoreRequest(); - request.setBackupRootDir(backupRootDir).setBackupId(backupId).setCheck(check). - setFromTables(fromTables).setToTables(toTables).setOverwrite(isOverwrite); - return request; -} - + /** * Helper method */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java index 2e04f1a..39c9942 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java @@ -19,6 +19,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; import org.apache.hadoop.hbase.client.BackupAdmin; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -60,9 +61,9 @@ public class TestBackupDeleteRestore extends TestBackupBase { TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = null;//new TableName[] { table1_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap, true)); - - + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, + tableset, tablemap, true)); + int numRowsAfterRestore = TEST_UTIL.countRows(table1); assertEquals( numRows, numRowsAfterRestore); hba.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java index 0eddbfe..639aea4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java @@ -20,6 +20,7 @@ import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; import org.apache.hadoop.hbase.client.BackupAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -34,6 +35,31 @@ public class TestFullRestore extends TestBackupBase { private static final Log LOG = LogFactory.getLog(TestFullRestore.class); + /* + void restoreTables(String backupRootDir, + String backupId, boolean check, boolean autoRestore, List sTable, + List tTable, boolean isOverwrite) throws IOException { + Connection conn = null; + HBaseAdmin admin = null; + RestoreRequest request = new RestoreRequest(); + request.setAutoRestore(autoRestore).setDependencyCheckOnly(check).setOverwrite(isOverwrite); + request.setBackupId(backupId).setBackupRootDir(backupRootDir); + request.setTableList(sTable).setTargetTableList(tTable); + try { + conn = ConnectionFactory.createConnection(conf1); + admin = (HBaseAdmin) conn.getAdmin(); + admin.restoreTablesSync(request); + } finally { + if (admin != null) { + admin.close(); + } + if (conn != null) { + conn.close(); + } + } + } + */ + /** * Verify that a single table is restored to a new table * @throws Exception @@ -52,7 +78,8 @@ public class TestFullRestore extends TestBackupBase { TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap, false)); + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, + tableset, tablemap, false)); HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); assertTrue(hba.tableExists(table1_restore)); TEST_UTIL.deleteTable(table1_restore); @@ -96,7 +123,7 @@ public class TestFullRestore extends TestBackupBase { TableName[] restore_tableset = new TableName[] { table2, table3 }; TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, restore_tableset, tablemap, false)); HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); assertTrue(hba.tableExists(table2_restore)); @@ -154,8 +181,8 @@ public class TestFullRestore extends TestBackupBase { TableName[] tableset = new TableName[] { table1 }; BackupAdmin client = getBackupAdmin(); - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, null, - true)); + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, + tableset, null, true)); } /** @@ -198,7 +225,7 @@ public class TestFullRestore extends TestBackupBase { TableName[] restore_tableset = new TableName[] { table2, table3 }; BackupAdmin client = getBackupAdmin(); - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, restore_tableset, null, true)); } @@ -245,8 +272,8 @@ public class TestFullRestore extends TestBackupBase { TableName[] tableset = new TableName[] { TableName.valueOf("faketable") }; TableName[] tablemap = new TableName[] { table1_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap, - false)); + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, + tableset, tablemap, false)); } @@ -291,7 +318,7 @@ public class TestFullRestore extends TestBackupBase { = new TableName[] { TableName.valueOf("faketable1"), TableName.valueOf("faketable2") }; TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, restore_tableset, tablemap, false)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java index 9a2ad89..8d30c6c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java @@ -27,6 +27,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; import org.apache.hadoop.hbase.client.BackupAdmin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -118,7 +119,7 @@ public class TestIncrementalBackup extends TestBackupBase { new TableName[] { table1_restore, table2_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, tablesMapFull, false)); @@ -143,8 +144,8 @@ public class TestIncrementalBackup extends TestBackupBase { new TableName[] { table1, table2 }; TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore }; - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false, - tablesRestoreIncMultiple, tablesMapIncMultiple, true)); + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, + false, tablesRestoreIncMultiple, tablesMapIncMultiple, true)); hTable = (HTable) conn.getTable(table1_restore); Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java index ad11548..a7c0713 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupDeleteTable.java @@ -25,6 +25,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; import org.apache.hadoop.hbase.client.BackupAdmin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -101,7 +102,7 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase { new TableName[] { table1_restore, table2_restore }; BackupAdmin client = getBackupAdmin(); - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, tablesMapFull, false)); @@ -126,8 +127,8 @@ public class TestIncrementalBackupDeleteTable extends TestBackupBase { new TableName[] { table1 }; TableName[] tablesMapIncMultiple = new TableName[] { table1_restore }; - client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false, - tablesRestoreIncMultiple, tablesMapIncMultiple, true)); + client.restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, + false, tablesRestoreIncMultiple, tablesMapIncMultiple, true)); hTable = (HTable) conn.getTable(table1_restore); Assert.assertThat(TEST_UTIL.countRows(hTable), CoreMatchers.equalTo(NB_ROWS_IN_BATCH * 2)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java index 1be2aa2..988d07c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRemoteRestore.java @@ -16,6 +16,7 @@ import static org.junit.Assert.assertTrue; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.junit.Test; @@ -39,8 +40,8 @@ public class TestRemoteRestore extends TestBackupBase { LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; - getBackupAdmin().restore(createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false, tableset, - tablemap, false)); + getBackupAdmin().restore(RestoreServerUtil.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, + backupId, false, tableset, tablemap, false)); HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); assertTrue(hba.tableExists(table1_restore)); TEST_UTIL.deleteTable(table1_restore); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java index d2308d1..e20f2cc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestRestoreBoundaryTests.java @@ -25,6 +25,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.junit.Test; @@ -46,8 +47,8 @@ public class TestRestoreBoundaryTests extends TestBackupBase { LOG.info("backup complete"); TableName[] tableset = new TableName[] { table1 }; TableName[] tablemap = new TableName[] { table1_restore }; - getBackupAdmin().restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tableset, tablemap, - false)); + getBackupAdmin().restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupId, + false, tableset, tablemap, false)); HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); assertTrue(hba.tableExists(table1_restore)); TEST_UTIL.deleteTable(table1_restore); @@ -65,13 +66,13 @@ public class TestRestoreBoundaryTests extends TestBackupBase { String backupId = fullTableBackup(tables); TableName[] restore_tableset = new TableName[] { table2, table3}; TableName[] tablemap = new TableName[] { table2_restore, table3_restore }; - getBackupAdmin().restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, restore_tableset, - tablemap, - false)); + getBackupAdmin().restore(RestoreServerUtil.createRestoreRequest(BACKUP_ROOT_DIR, backupId, + false, restore_tableset, tablemap, false)); HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); assertTrue(hba.tableExists(table2_restore)); assertTrue(hba.tableExists(table3_restore)); TEST_UTIL.deleteTable(table2_restore); TEST_UTIL.deleteTable(table3_restore); + hba.close(); } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index d3d5357..b28cdf3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -440,6 +440,12 @@ public class TestCatalogJanitor { return null; } + @Override + public long restoreTables(String backupRootDir, + String backupId, boolean check, List sTableList, + List tTableList, boolean isOverwrite) throws IOException { + return 1; + } @Override public List listTableDescriptorsByNamespace(String name) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesProcedure.java new file mode 100644 index 0000000..0acac84 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesProcedure.java @@ -0,0 +1,393 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.TreeSet; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; +import org.apache.hadoop.hbase.backup.util.RestoreServerUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesState; +import org.apache.hadoop.security.UserGroupInformation; + +@InterfaceAudience.Private +public class RestoreTablesProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(RestoreTablesProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + private Configuration conf; + private String backupId; + private List sTableList; + private List tTableList; + private String targetRootDir; + private boolean isOverwrite; + private UserGroupInformation user; + + public RestoreTablesProcedure() { + // Required by the Procedure framework to create the procedure on replay + } + + public RestoreTablesProcedure(final MasterProcedureEnv env, + final String targetRootDir, String backupId, List sTableList, + List tTableList, boolean isOverwrite) throws IOException { + this.targetRootDir = targetRootDir; + this.backupId = backupId; + this.sTableList = sTableList; + this.tTableList = tTableList; + if (tTableList == null || tTableList.isEmpty()) { + this.tTableList = sTableList; + } + this.isOverwrite = isOverwrite; + this.user = env.getRequestUser().getUGI(); + this.setOwner(this.user.getShortUserName()); + } + + @Override + public byte[] getResult() { + return null; + } + + /** + * Validate target Tables + * @param tTableArray: target tables + * @param isOverwrite overwrite existing table + * @throws IOException exception + */ + private void checkTargetTables(TableName[] tTableArray, boolean isOverwrite) + throws IOException { + ArrayList existTableList = new ArrayList<>(); + ArrayList disabledTableList = new ArrayList<>(); + + // check if the tables already exist + try (HConnection conn = (HConnection) ConnectionFactory.createConnection(conf)) { + for (TableName tableName : tTableArray) { + if (MetaTableAccessor.tableExists(conn, tableName)) { + existTableList.add(tableName); + if (conn.isTableDisabled(tableName)) { + disabledTableList.add(tableName); + } + } else { + LOG.info("HBase table " + tableName + + " does not exist. It will be created during restore process"); + } + } + } + + if (existTableList.size() > 0) { + if (!isOverwrite) { + LOG.error("Existing table (" + existTableList + ") found in the restore target, please add " + + "\"-overwrite\" option in the command if you mean to restore to these existing tables"); + throw new IOException("Existing table found in target while no \"-overwrite\" " + + "option found"); + } else { + if (disabledTableList.size() > 0) { + LOG.error("Found offline table in the restore target, " + + "please enable them before restore with \"-overwrite\" option"); + LOG.info("Offline table list in restore target: " + disabledTableList); + throw new IOException( + "Found offline table in the target when restore with \"-overwrite\" option"); + } + } + } + } + + /** + * Restore operation handle each backupImage in iterator + * @param it: backupImage iterator - ascending + * @param sTable: table to be restored + * @param tTable: table to be restored to + * @throws IOException exception + */ + private void restoreImages(Iterator it, TableName sTable, + TableName tTable, boolean truncateIfExists) + throws IOException { + + // First image MUST be image of a FULL backup + BackupImage image = it.next(); + + String rootDir = image.getRootDir(); + String backupId = image.getBackupId(); + Path backupRoot = new Path(rootDir); + + // We need hFS only for full restore (see the code) + RestoreServerUtil restoreTool = new RestoreServerUtil(conf, backupRoot, backupId); + BackupManifest manifest = HBackupFileSystem.getManifest(sTable, conf, backupRoot, backupId); + + Path tableBackupPath = HBackupFileSystem.getTableBackupPath(sTable, backupRoot, backupId); + + // TODO: convert feature will be provided in a future JIRA + boolean converted = false; + + if (manifest.getType() == BackupType.FULL || converted) { + LOG.info("Restoring '" + sTable + "' to '" + tTable + "' from " + + (converted ? "converted" : "full") + " backup image " + tableBackupPath.toString()); + restoreTool.fullRestoreTable(tableBackupPath, sTable, tTable, + converted, truncateIfExists); + + } else { // incremental Backup + throw new IOException("Unexpected backup type " + image.getType()); + } + + // The rest one are incremental + if (it.hasNext()) { + List logDirList = new ArrayList(); + while (it.hasNext()) { + BackupImage im = it.next(); + String logBackupDir = HBackupFileSystem.getLogBackupDir(im.getRootDir(), im.getBackupId()); + logDirList.add(logBackupDir); + } + String logDirs = StringUtils.join(logDirList, ","); + LOG.info("Restoring '" + sTable + "' to '" + tTable + + "' from log dirs: " + logDirs); + String[] sarr = new String[logDirList.size()]; + logDirList.toArray(sarr); + Path[] paths = org.apache.hadoop.util.StringUtils.stringToPath(sarr); + restoreTool.incrementalRestoreTable(paths, new TableName[] { sTable }, + new TableName[] { tTable }); + } + LOG.info(sTable + " has been successfully restored to " + tTable); + } + + /** + * Restore operation. Stage 2: resolved Backup Image dependency + * @param backupManifestMap : tableName, Manifest + * @param sTableArray The array of tables to be restored + * @param tTableArray The array of mapping tables to restore to + * @return set of BackupImages restored + * @throws IOException exception + */ + private void restoreStage(HashMap backupManifestMap, + TableName[] sTableArray, TableName[] tTableArray, boolean isOverwrite) throws IOException { + TreeSet restoreImageSet = new TreeSet(); + boolean truncateIfExists = isOverwrite; + try { + for (int i = 0; i < sTableArray.length; i++) { + TableName table = sTableArray[i]; + BackupManifest manifest = backupManifestMap.get(table); + // Get the image list of this backup for restore in time order from old + // to new. + List list = new ArrayList(); + list.add(manifest.getBackupImage()); + List depList = manifest.getDependentListByTable(table); + list.addAll(depList); + TreeSet restoreList = new TreeSet(list); + LOG.debug("need to clear merged Image. to be implemented in future jira"); + restoreImages(restoreList.iterator(), table, tTableArray[i], truncateIfExists); + restoreImageSet.addAll(restoreList); + + if (restoreImageSet != null && !restoreImageSet.isEmpty()) { + LOG.info("Restore includes the following image(s):"); + for (BackupImage image : restoreImageSet) { + LOG.info("Backup: " + + image.getBackupId() + + " " + + HBackupFileSystem.getTableBackupDir(image.getRootDir(), image.getBackupId(), + table)); + } + } + } + } catch (Exception e) { + LOG.error("Failed", e); + throw new IOException(e); + } + LOG.debug("restoreStage finished"); + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, final RestoreTablesState state) + throws InterruptedException { + if (conf == null) { + conf = env.getMasterConfiguration(); + } + if (LOG.isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + if (tTableList == null || tTableList.isEmpty()) { + tTableList = sTableList; + } + Connection conn = env.getMasterServices().getClusterConnection(); + TableName[] tTableArray = tTableList.toArray(new TableName[tTableList.size()]); + try (Admin admin = conn.getAdmin()) { + switch (state) { + case VALIDATION: + + // check the target tables + checkTargetTables(tTableArray, isOverwrite); + + setNextState(RestoreTablesState.RESTORE_IMAGES); + break; + case RESTORE_IMAGES: + TableName[] sTableArray = sTableList.toArray(new TableName[sTableList.size()]); + HashMap backupManifestMap = new HashMap<>(); + // check and load backup image manifest for the tables + Path rootPath = new Path(targetRootDir); + HBackupFileSystem.checkImageManifestExist(backupManifestMap, sTableArray, conf, rootPath, + backupId); + restoreStage(backupManifestMap, sTableArray, tTableArray, isOverwrite); + + return Flow.NO_MORE_STATE; + + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + setFailure("restore-table", e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final RestoreTablesState state) + throws IOException { + } + + @Override + protected RestoreTablesState getState(final int stateId) { + return RestoreTablesState.valueOf(stateId); + } + + @Override + protected int getStateId(final RestoreTablesState state) { + return state.getNumber(); + } + + @Override + protected RestoreTablesState getInitialState() { + return RestoreTablesState.VALIDATION; + } + + @Override + protected void setNextState(final RestoreTablesState state) { + if (aborted.get()) { + setAbortFailure("snapshot-table", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (targetRootDir="); + sb.append(targetRootDir); + sb.append(" isOverwrite= "); + sb.append(isOverwrite); + sb.append(" backupId= "); + sb.append(backupId); + sb.append(")"); + } + + MasterProtos.RestoreTablesRequest toRestoreTables() { + MasterProtos.RestoreTablesRequest.Builder bldr = MasterProtos.RestoreTablesRequest.newBuilder(); + bldr.setOverwrite(isOverwrite).setBackupId(backupId); + bldr.setBackupRootDir(targetRootDir); + for (TableName table : sTableList) { + bldr.addTables(ProtobufUtil.toProtoTableName(table)); + } + for (TableName table : tTableList) { + bldr.addTargetTables(ProtobufUtil.toProtoTableName(table)); + } + return bldr.build(); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProtos.RestoreTablesRequest restoreTables = toRestoreTables(); + restoreTables.writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + MasterProtos.RestoreTablesRequest proto = + MasterProtos.RestoreTablesRequest.parseDelimitedFrom(stream); + backupId = proto.getBackupId(); + targetRootDir = proto.getBackupRootDir(); + isOverwrite = proto.getOverwrite(); + sTableList = new ArrayList<>(proto.getTablesList().size()); + for (HBaseProtos.TableName table : proto.getTablesList()) { + sTableList.add(ProtobufUtil.toTableName(table)); + } + tTableList = new ArrayList<>(proto.getTargetTablesList().size()); + for (HBaseProtos.TableName table : proto.getTargetTablesList()) { + tTableList.add(ProtobufUtil.toTableName(table)); + } + } + + @Override + public TableName getTableName() { + return TableName.BACKUP_TABLE_NAME; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.RESTORE; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (env.waitInitialized(this)) { + return false; + } + return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } +}