diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java new file mode 100644 index 0000000..4fd843a --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/backup/RestoreRequest.java @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup; + +import java.util.List; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * POJO class for restore request + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public final class RestoreRequest { + String backupId; + List tableList; + List targetTableList; + String backupRootDir; + boolean dependencyCheckOnly; + boolean autoRestore; + boolean overwrite; + + public RestoreRequest() { + } + + public RestoreRequest setBackupId(String backupId) { + this.backupId = backupId; + return this; + } + public String getBackupId() { + return this.backupId; + } + + public RestoreRequest setTableList(List tableList) { + this.tableList = tableList; + return this; + } + public List getTableList() { + return this.tableList; + } + + public RestoreRequest setTargetTableList(List targetTableList) { + this.targetTableList = targetTableList; + return this; + } + public List getTargetTableList() { + return this.targetTableList; + } + + public RestoreRequest setBackupRootDir(String backupRootDir) { + this.backupRootDir = backupRootDir; + return this; + } + public String getBackupRootDir() { + return this.backupRootDir; + } + + public RestoreRequest setDependencyCheckOnly(boolean dependencyCheckOnly) { + this.dependencyCheckOnly = dependencyCheckOnly; + return this; + } + public boolean getDependencyCheckOnly() { + return this.dependencyCheckOnly; + } + + public RestoreRequest setAutoRestore(boolean autoRestore) { + this.autoRestore = autoRestore; + return this; + } + public boolean getAutoRestore() { + return this.autoRestore; + } + + public RestoreRequest setOverwrite(boolean overwrite) { + this.overwrite = overwrite; + return this; + } + public boolean getOverwrite() { + return this.overwrite; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 02ffc18..6d902e3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.backup.BackupRequest; -import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.security.SecurityCapability; @@ -944,6 +944,20 @@ public interface Admin extends Abortable, Closeable { public String backupTables(final BackupRequest request) throws IOException; /** + * Restore operation. Asynchronous version. + * @param request RestoreRequest instance + * @throws IOException + */ + public Future restoreTablesAsync(final RestoreRequest request) throws IOException; + + /** + * Restore operation. Synchronous version. + * @param request RestoreRequest instance + * @throws IOException + */ + public void restoreTables(final RestoreRequest userRequest) throws IOException; + + /** * Modify an existing table, more IRB friendly version. Asynchronous operation. This means that * it may be a while before your schema change is updated across all of the table. * You can use Future.get(long, TimeUnit) to wait on the operation to complete. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index c245ef4..1ac43f9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -1408,6 +1408,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } @Override + public MasterProtos.RestoreTablesResponse restoreTables( + RpcController controller, + MasterProtos.RestoreTablesRequest request) throws ServiceException { + return stub.restoreTables(controller, request); + } + + @Override public MasterProtos.AddColumnResponse addColumn( RpcController controller, MasterProtos.AddColumnRequest request) throws ServiceException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 3cc846c..e4f3d23 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.backup.BackupRequest; import org.apache.hadoop.hbase.backup.BackupClientUtil; +import org.apache.hadoop.hbase.backup.RestoreRequest; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.security.SecurityCapability; @@ -148,6 +149,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRespon import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; @@ -1632,6 +1635,61 @@ public class HBaseAdmin implements Admin { } } + /** + * Restore operation. + * @param request RestoreRequest instance + * @throws IOException + */ + @Override + public Future restoreTablesAsync(final RestoreRequest userRequest) throws IOException { + RestoreTablesResponse response = executeCallable( + new MasterCallable(getConnection()) { + @Override + public RestoreTablesResponse call(int callTimeout) throws ServiceException { + RestoreTablesRequest request = RequestConverter.buildRestoreTablesRequest( + userRequest.getBackupRootDir(), userRequest.getBackupId(), + userRequest.getDependencyCheckOnly(), userRequest.getAutoRestore(), + userRequest.getTableList(), userRequest.getTargetTableList(), + userRequest.getOverwrite()); + return master.restoreTables(null, request); + } + }); + return new TableRestoreFuture(this, TableName.BACKUP_TABLE_NAME, response); + } + + @Override + public void restoreTables(final RestoreRequest userRequest) throws IOException { + get(restoreTablesAsync(userRequest), + syncWaitTimeout, TimeUnit.MILLISECONDS); + } + + private static class TableRestoreFuture extends TableFuture { + public TableRestoreFuture(final HBaseAdmin admin, final TableName tableName, + final RestoreTablesResponse response) { + super(admin, tableName, + (response != null && response.hasProcId()) ? response.getProcId() : null); + } + + @Override + public String getOperationType() { + return "RESTORE"; + } + + @Override + protected Void convertResult(final GetProcedureResultResponse response) throws IOException { + if (response.hasException()) { + throw ForeignExceptionUtil.toIOException(response.getException()); + } + return null; + } + + @Override + protected Void postOperationResult(final Void result, + final long deadlineTs) throws IOException, TimeoutException { + return result; + } + } + @Override public Future modifyTable(final TableName tableName, final HTableDescriptor htd) throws IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 5babfb8..cadf12e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -105,6 +105,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableReques import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; @@ -1286,6 +1287,25 @@ public final class RequestConverter { return builder.build(); } + public static RestoreTablesRequest buildRestoreTablesRequest(String backupRootDir, + String backupId, boolean check, boolean autoRestore, List sTableList, + List tTableList, boolean isOverwrite) { + RestoreTablesRequest.Builder builder = RestoreTablesRequest.newBuilder(); + builder.setAutoRestore(autoRestore).setBackupId(backupId).setBackupRootDir(backupRootDir); + builder.setDependencyCheckOnly(check).setOverwrite(isOverwrite); + if (sTableList != null) { + for (TableName table : sTableList) { + builder.addTables(ProtobufUtil.toProtoTableName(table)); + } + } + if (tTableList != null) { + for (TableName table : tTableList) { + builder.addTargetTables(ProtobufUtil.toProtoTableName(table)); + } + } + return builder.build(); + } + /** * Creates a protocol buffer GetSchemaAlterStatusRequest * diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index b5b6b4c..f3e9c08 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -90,6 +90,88 @@ public final class MasterProtos { // @@protoc_insertion_point(enum_scope:hbase.pb.MasterSwitchType) } + /** + * Protobuf enum {@code hbase.pb.RestoreTablesState} + */ + public enum RestoreTablesState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * VALIDATION = 1; + */ + VALIDATION(0, 1), + /** + * RESTORE_IMAGES = 2; + */ + RESTORE_IMAGES(1, 2), + ; + + /** + * VALIDATION = 1; + */ + public static final int VALIDATION_VALUE = 1; + /** + * RESTORE_IMAGES = 2; + */ + public static final int RESTORE_IMAGES_VALUE = 2; + + + public final int getNumber() { return value; } + + public static RestoreTablesState valueOf(int value) { + switch (value) { + case 1: return VALIDATION; + case 2: return RESTORE_IMAGES; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public RestoreTablesState findValueByNumber(int number) { + return RestoreTablesState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.getDescriptor().getEnumTypes().get(1); + } + + private static final RestoreTablesState[] VALUES = values(); + + public static RestoreTablesState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private RestoreTablesState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.RestoreTablesState) + } + public interface AddColumnRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -59468,20 +59550,1916 @@ public final class MasterProtos { return tables_.get(index); } - // required string target_root_dir = 3; - public static final int TARGET_ROOT_DIR_FIELD_NUMBER = 3; - private java.lang.Object targetRootDir_; + // required string target_root_dir = 3; + public static final int TARGET_ROOT_DIR_FIELD_NUMBER = 3; + private java.lang.Object targetRootDir_; + /** + * required string target_root_dir = 3; + */ + public boolean hasTargetRootDir() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string target_root_dir = 3; + */ + public java.lang.String getTargetRootDir() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetRootDir_ = s; + } + return s; + } + } + /** + * required string target_root_dir = 3; + */ + public com.google.protobuf.ByteString + getTargetRootDirBytes() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetRootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional int64 workers = 4; + public static final int WORKERS_FIELD_NUMBER = 4; + private long workers_; + /** + * optional int64 workers = 4; + */ + public boolean hasWorkers() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional int64 workers = 4; + */ + public long getWorkers() { + return workers_; + } + + // optional int64 bandwidth = 5; + public static final int BANDWIDTH_FIELD_NUMBER = 5; + private long bandwidth_; + /** + * optional int64 bandwidth = 5; + */ + public boolean hasBandwidth() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional int64 bandwidth = 5; + */ + public long getBandwidth() { + return bandwidth_; + } + + private void initFields() { + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + tables_ = java.util.Collections.emptyList(); + targetRootDir_ = ""; + workers_ = 0L; + bandwidth_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTargetRootDir()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, type_.getNumber()); + } + for (int i = 0; i < tables_.size(); i++) { + output.writeMessage(2, tables_.get(i)); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(3, getTargetRootDirBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeInt64(4, workers_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeInt64(5, bandwidth_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, type_.getNumber()); + } + for (int i = 0; i < tables_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tables_.get(i)); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getTargetRootDirBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(4, workers_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeInt64Size(5, bandwidth_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) obj; + + boolean result = true; + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && + (getType() == other.getType()); + } + result = result && getTablesList() + .equals(other.getTablesList()); + result = result && (hasTargetRootDir() == other.hasTargetRootDir()); + if (hasTargetRootDir()) { + result = result && getTargetRootDir() + .equals(other.getTargetRootDir()); + } + result = result && (hasWorkers() == other.hasWorkers()); + if (hasWorkers()) { + result = result && (getWorkers() + == other.getWorkers()); + } + result = result && (hasBandwidth() == other.hasBandwidth()); + if (hasBandwidth()) { + result = result && (getBandwidth() + == other.getBandwidth()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getType()); + } + if (getTablesCount() > 0) { + hash = (37 * hash) + TABLES_FIELD_NUMBER; + hash = (53 * hash) + getTablesList().hashCode(); + } + if (hasTargetRootDir()) { + hash = (37 * hash) + TARGET_ROOT_DIR_FIELD_NUMBER; + hash = (53 * hash) + getTargetRootDir().hashCode(); + } + if (hasWorkers()) { + hash = (37 * hash) + WORKERS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getWorkers()); + } + if (hasBandwidth()) { + hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getBandwidth()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupTablesRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTablesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + bitField0_ = (bitField0_ & ~0x00000001); + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + tablesBuilder_.clear(); + } + targetRootDir_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + workers_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + bandwidth_ = 0L; + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.type_ = type_; + if (tablesBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.tables_ = tables_; + } else { + result.tables_ = tablesBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000002; + } + result.targetRootDir_ = targetRootDir_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000004; + } + result.workers_ = workers_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000008; + } + result.bandwidth_ = bandwidth_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance()) return this; + if (other.hasType()) { + setType(other.getType()); + } + if (tablesBuilder_ == null) { + if (!other.tables_.isEmpty()) { + if (tables_.isEmpty()) { + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureTablesIsMutable(); + tables_.addAll(other.tables_); + } + onChanged(); + } + } else { + if (!other.tables_.isEmpty()) { + if (tablesBuilder_.isEmpty()) { + tablesBuilder_.dispose(); + tablesBuilder_ = null; + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000002); + tablesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTablesFieldBuilder() : null; + } else { + tablesBuilder_.addAllMessages(other.tables_); + } + } + } + if (other.hasTargetRootDir()) { + bitField0_ |= 0x00000004; + targetRootDir_ = other.targetRootDir_; + onChanged(); + } + if (other.hasWorkers()) { + setWorkers(other.getWorkers()); + } + if (other.hasBandwidth()) { + setBandwidth(other.getBandwidth()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasType()) { + + return false; + } + if (!hasTargetRootDir()) { + + return false; + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.BackupType type = 1; + private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + /** + * required .hbase.pb.BackupType type = 1; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.BackupType type = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { + return type_; + } + /** + * required .hbase.pb.BackupType type = 1; + */ + public Builder setType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + type_ = value; + onChanged(); + return this; + } + /** + * required .hbase.pb.BackupType type = 1; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000001); + type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + onChanged(); + return this; + } + + // repeated .hbase.pb.TableName tables = 2; + private java.util.List tables_ = + java.util.Collections.emptyList(); + private void ensureTablesIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = new java.util.ArrayList(tables_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tablesBuilder_; + + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List getTablesList() { + if (tablesBuilder_ == null) { + return java.util.Collections.unmodifiableList(tables_); + } else { + return tablesBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public int getTablesCount() { + if (tablesBuilder_ == null) { + return tables_.size(); + } else { + return tablesBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); + } else { + return tablesBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.set(index, value); + onChanged(); + } else { + tablesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.set(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(value); + onChanged(); + } else { + tablesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(index, value); + onChanged(); + } else { + tablesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder addAllTables( + java.lang.Iterable values) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + super.addAll(values, tables_); + onChanged(); + } else { + tablesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder clearTables() { + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + tablesBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public Builder removeTables(int index) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.remove(index); + onChanged(); + } else { + tablesBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTablesBuilder( + int index) { + return getTablesFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); } else { + return tablesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List + getTablesOrBuilderList() { + if (tablesBuilder_ != null) { + return tablesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tables_); + } + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder() { + return getTablesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTablesBuilder( + int index) { + return getTablesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List + getTablesBuilderList() { + return getTablesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTablesFieldBuilder() { + if (tablesBuilder_ == null) { + tablesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tables_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + tables_ = null; + } + return tablesBuilder_; + } + + // required string target_root_dir = 3; + private java.lang.Object targetRootDir_ = ""; + /** + * required string target_root_dir = 3; + */ + public boolean hasTargetRootDir() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string target_root_dir = 3; + */ + public java.lang.String getTargetRootDir() { + java.lang.Object ref = targetRootDir_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetRootDir_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string target_root_dir = 3; + */ + public com.google.protobuf.ByteString + getTargetRootDirBytes() { + java.lang.Object ref = targetRootDir_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetRootDir_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string target_root_dir = 3; + */ + public Builder setTargetRootDir( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + targetRootDir_ = value; + onChanged(); + return this; + } + /** + * required string target_root_dir = 3; + */ + public Builder clearTargetRootDir() { + bitField0_ = (bitField0_ & ~0x00000004); + targetRootDir_ = getDefaultInstance().getTargetRootDir(); + onChanged(); + return this; + } + /** + * required string target_root_dir = 3; + */ + public Builder setTargetRootDirBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + targetRootDir_ = value; + onChanged(); + return this; + } + + // optional int64 workers = 4; + private long workers_ ; + /** + * optional int64 workers = 4; + */ + public boolean hasWorkers() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional int64 workers = 4; + */ + public long getWorkers() { + return workers_; + } + /** + * optional int64 workers = 4; + */ + public Builder setWorkers(long value) { + bitField0_ |= 0x00000008; + workers_ = value; + onChanged(); + return this; + } + /** + * optional int64 workers = 4; + */ + public Builder clearWorkers() { + bitField0_ = (bitField0_ & ~0x00000008); + workers_ = 0L; + onChanged(); + return this; + } + + // optional int64 bandwidth = 5; + private long bandwidth_ ; + /** + * optional int64 bandwidth = 5; + */ + public boolean hasBandwidth() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional int64 bandwidth = 5; + */ + public long getBandwidth() { + return bandwidth_; + } + /** + * optional int64 bandwidth = 5; + */ + public Builder setBandwidth(long value) { + bitField0_ |= 0x00000010; + bandwidth_ = value; + onChanged(); + return this; + } + /** + * optional int64 bandwidth = 5; + */ + public Builder clearBandwidth() { + bitField0_ = (bitField0_ & ~0x00000010); + bandwidth_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupTablesRequest) + } + + static { + defaultInstance = new BackupTablesRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BackupTablesRequest) + } + + public interface BackupTablesResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional uint64 proc_id = 1; + /** + * optional uint64 proc_id = 1; + */ + boolean hasProcId(); + /** + * optional uint64 proc_id = 1; + */ + long getProcId(); + + // optional string backup_id = 2; + /** + * optional string backup_id = 2; + */ + boolean hasBackupId(); + /** + * optional string backup_id = 2; + */ + java.lang.String getBackupId(); + /** + * optional string backup_id = 2; + */ + com.google.protobuf.ByteString + getBackupIdBytes(); + } + /** + * Protobuf type {@code hbase.pb.BackupTablesResponse} + */ + public static final class BackupTablesResponse extends + com.google.protobuf.GeneratedMessage + implements BackupTablesResponseOrBuilder { + // Use BackupTablesResponse.newBuilder() to construct. + private BackupTablesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BackupTablesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BackupTablesResponse defaultInstance; + public static BackupTablesResponse getDefaultInstance() { + return defaultInstance; + } + + public BackupTablesResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BackupTablesResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + backupId_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BackupTablesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BackupTablesResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional uint64 proc_id = 1; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + + // optional string backup_id = 2; + public static final int BACKUP_ID_FIELD_NUMBER = 2; + private java.lang.Object backupId_; + /** + * optional string backup_id = 2; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string backup_id = 2; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + backupId_ = s; + } + return s; + } + } + /** + * optional string backup_id = 2; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + procId_ = 0L; + backupId_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, procId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getBackupIdBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, procId_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getBackupIdBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) obj; + + boolean result = true; + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BackupTablesResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + backupId_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.procId_ = procId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.backupId_ = backupId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); + } + if (other.hasBackupId()) { + bitField0_ |= 0x00000002; + backupId_ = other.backupId_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional uint64 proc_id = 1; + private long procId_ ; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); + return this; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000001); + procId_ = 0L; + onChanged(); + return this; + } + + // optional string backup_id = 2; + private java.lang.Object backupId_ = ""; + /** + * optional string backup_id = 2; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string backup_id = 2; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string backup_id = 2; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string backup_id = 2; + */ + public Builder setBackupId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + backupId_ = value; + onChanged(); + return this; + } + /** + * optional string backup_id = 2; + */ + public Builder clearBackupId() { + bitField0_ = (bitField0_ & ~0x00000002); + backupId_ = getDefaultInstance().getBackupId(); + onChanged(); + return this; + } + /** + * optional string backup_id = 2; + */ + public Builder setBackupIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + backupId_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BackupTablesResponse) + } + + static { + defaultInstance = new BackupTablesResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BackupTablesResponse) + } + + public interface RestoreTablesRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string backup_id = 1; + /** + * required string backup_id = 1; + */ + boolean hasBackupId(); + /** + * required string backup_id = 1; + */ + java.lang.String getBackupId(); + /** + * required string backup_id = 1; + */ + com.google.protobuf.ByteString + getBackupIdBytes(); + + // repeated .hbase.pb.TableName tables = 2; + /** + * repeated .hbase.pb.TableName tables = 2; + */ + java.util.List + getTablesList(); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + int getTablesCount(); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + java.util.List + getTablesOrBuilderList(); + /** + * repeated .hbase.pb.TableName tables = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index); + + // repeated .hbase.pb.TableName target_tables = 3; + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + java.util.List + getTargetTablesList(); + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTargetTables(int index); + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + int getTargetTablesCount(); + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + java.util.List + getTargetTablesOrBuilderList(); + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTargetTablesOrBuilder( + int index); + + // required string backup_root_dir = 4; + /** + * required string backup_root_dir = 4; + */ + boolean hasBackupRootDir(); + /** + * required string backup_root_dir = 4; + */ + java.lang.String getBackupRootDir(); + /** + * required string backup_root_dir = 4; + */ + com.google.protobuf.ByteString + getBackupRootDirBytes(); + + // optional bool dependency_check_only = 5; + /** + * optional bool dependency_check_only = 5; + */ + boolean hasDependencyCheckOnly(); + /** + * optional bool dependency_check_only = 5; + */ + boolean getDependencyCheckOnly(); + + // optional bool auto_restore = 6; + /** + * optional bool auto_restore = 6; + */ + boolean hasAutoRestore(); + /** + * optional bool auto_restore = 6; + */ + boolean getAutoRestore(); + + // optional bool overwrite = 7; + /** + * optional bool overwrite = 7; + */ + boolean hasOverwrite(); + /** + * optional bool overwrite = 7; + */ + boolean getOverwrite(); + } + /** + * Protobuf type {@code hbase.pb.RestoreTablesRequest} + */ + public static final class RestoreTablesRequest extends + com.google.protobuf.GeneratedMessage + implements RestoreTablesRequestOrBuilder { + // Use RestoreTablesRequest.newBuilder() to construct. + private RestoreTablesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RestoreTablesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RestoreTablesRequest defaultInstance; + public static RestoreTablesRequest getDefaultInstance() { + return defaultInstance; + } + + public RestoreTablesRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RestoreTablesRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + backupId_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + tables_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + targetTables_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + targetTables_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry)); + break; + } + case 34: { + bitField0_ |= 0x00000002; + backupRootDir_ = input.readBytes(); + break; + } + case 40: { + bitField0_ |= 0x00000004; + dependencyCheckOnly_ = input.readBool(); + break; + } + case 48: { + bitField0_ |= 0x00000008; + autoRestore_ = input.readBool(); + break; + } + case 56: { + bitField0_ |= 0x00000010; + overwrite_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + } + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + targetTables_ = java.util.Collections.unmodifiableList(targetTables_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RestoreTablesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RestoreTablesRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string backup_id = 1; + public static final int BACKUP_ID_FIELD_NUMBER = 1; + private java.lang.Object backupId_; + /** + * required string backup_id = 1; + */ + public boolean hasBackupId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string backup_id = 1; + */ + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + backupId_ = s; + } + return s; + } + } + /** + * required string backup_id = 1; + */ + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .hbase.pb.TableName tables = 2; + public static final int TABLES_FIELD_NUMBER = 2; + private java.util.List tables_; + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List getTablesList() { + return tables_; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public java.util.List + getTablesOrBuilderList() { + return tables_; + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public int getTablesCount() { + return tables_.size(); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTables(int index) { + return tables_.get(index); + } + /** + * repeated .hbase.pb.TableName tables = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + return tables_.get(index); + } + + // repeated .hbase.pb.TableName target_tables = 3; + public static final int TARGET_TABLES_FIELD_NUMBER = 3; + private java.util.List targetTables_; /** - * required string target_root_dir = 3; + * repeated .hbase.pb.TableName target_tables = 3; */ - public boolean hasTargetRootDir() { + public java.util.List getTargetTablesList() { + return targetTables_; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public java.util.List + getTargetTablesOrBuilderList() { + return targetTables_; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public int getTargetTablesCount() { + return targetTables_.size(); + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTargetTables(int index) { + return targetTables_.get(index); + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTargetTablesOrBuilder( + int index) { + return targetTables_.get(index); + } + + // required string backup_root_dir = 4; + public static final int BACKUP_ROOT_DIR_FIELD_NUMBER = 4; + private java.lang.Object backupRootDir_; + /** + * required string backup_root_dir = 4; + */ + public boolean hasBackupRootDir() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * required string target_root_dir = 3; + * required string backup_root_dir = 4; */ - public java.lang.String getTargetRootDir() { - java.lang.Object ref = targetRootDir_; + public java.lang.String getBackupRootDir() { + java.lang.Object ref = backupRootDir_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { @@ -59489,77 +61467,95 @@ public final class MasterProtos { (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { - targetRootDir_ = s; + backupRootDir_ = s; } return s; } } /** - * required string target_root_dir = 3; + * required string backup_root_dir = 4; */ public com.google.protobuf.ByteString - getTargetRootDirBytes() { - java.lang.Object ref = targetRootDir_; + getBackupRootDirBytes() { + java.lang.Object ref = backupRootDir_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - targetRootDir_ = b; + backupRootDir_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } - // optional int64 workers = 4; - public static final int WORKERS_FIELD_NUMBER = 4; - private long workers_; + // optional bool dependency_check_only = 5; + public static final int DEPENDENCY_CHECK_ONLY_FIELD_NUMBER = 5; + private boolean dependencyCheckOnly_; /** - * optional int64 workers = 4; + * optional bool dependency_check_only = 5; */ - public boolean hasWorkers() { + public boolean hasDependencyCheckOnly() { return ((bitField0_ & 0x00000004) == 0x00000004); } /** - * optional int64 workers = 4; + * optional bool dependency_check_only = 5; */ - public long getWorkers() { - return workers_; + public boolean getDependencyCheckOnly() { + return dependencyCheckOnly_; } - // optional int64 bandwidth = 5; - public static final int BANDWIDTH_FIELD_NUMBER = 5; - private long bandwidth_; + // optional bool auto_restore = 6; + public static final int AUTO_RESTORE_FIELD_NUMBER = 6; + private boolean autoRestore_; /** - * optional int64 bandwidth = 5; + * optional bool auto_restore = 6; */ - public boolean hasBandwidth() { + public boolean hasAutoRestore() { return ((bitField0_ & 0x00000008) == 0x00000008); } /** - * optional int64 bandwidth = 5; + * optional bool auto_restore = 6; */ - public long getBandwidth() { - return bandwidth_; + public boolean getAutoRestore() { + return autoRestore_; + } + + // optional bool overwrite = 7; + public static final int OVERWRITE_FIELD_NUMBER = 7; + private boolean overwrite_; + /** + * optional bool overwrite = 7; + */ + public boolean hasOverwrite() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional bool overwrite = 7; + */ + public boolean getOverwrite() { + return overwrite_; } private void initFields() { - type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + backupId_ = ""; tables_ = java.util.Collections.emptyList(); - targetRootDir_ = ""; - workers_ = 0L; - bandwidth_ = 0L; + targetTables_ = java.util.Collections.emptyList(); + backupRootDir_ = ""; + dependencyCheckOnly_ = false; + autoRestore_ = false; + overwrite_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - if (!hasType()) { + if (!hasBackupId()) { memoizedIsInitialized = 0; return false; } - if (!hasTargetRootDir()) { + if (!hasBackupRootDir()) { memoizedIsInitialized = 0; return false; } @@ -59569,6 +61565,12 @@ public final class MasterProtos { return false; } } + for (int i = 0; i < getTargetTablesCount(); i++) { + if (!getTargetTables(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -59577,19 +61579,25 @@ public final class MasterProtos { throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeEnum(1, type_.getNumber()); + output.writeBytes(1, getBackupIdBytes()); } for (int i = 0; i < tables_.size(); i++) { output.writeMessage(2, tables_.get(i)); } + for (int i = 0; i < targetTables_.size(); i++) { + output.writeMessage(3, targetTables_.get(i)); + } if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(3, getTargetRootDirBytes()); + output.writeBytes(4, getBackupRootDirBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeInt64(4, workers_); + output.writeBool(5, dependencyCheckOnly_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { - output.writeInt64(5, bandwidth_); + output.writeBool(6, autoRestore_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBool(7, overwrite_); } getUnknownFields().writeTo(output); } @@ -59602,23 +61610,31 @@ public final class MasterProtos { size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, type_.getNumber()); + .computeBytesSize(1, getBackupIdBytes()); } for (int i = 0; i < tables_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(2, tables_.get(i)); } + for (int i = 0; i < targetTables_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, targetTables_.get(i)); + } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, getTargetRootDirBytes()); + .computeBytesSize(4, getBackupRootDirBytes()); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream - .computeInt64Size(4, workers_); + .computeBoolSize(5, dependencyCheckOnly_); } if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream - .computeInt64Size(5, bandwidth_); + .computeBoolSize(6, autoRestore_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(7, overwrite_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; @@ -59637,33 +61653,40 @@ public final class MasterProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest) obj; boolean result = true; - result = result && (hasType() == other.hasType()); - if (hasType()) { - result = result && - (getType() == other.getType()); + result = result && (hasBackupId() == other.hasBackupId()); + if (hasBackupId()) { + result = result && getBackupId() + .equals(other.getBackupId()); } result = result && getTablesList() .equals(other.getTablesList()); - result = result && (hasTargetRootDir() == other.hasTargetRootDir()); - if (hasTargetRootDir()) { - result = result && getTargetRootDir() - .equals(other.getTargetRootDir()); - } - result = result && (hasWorkers() == other.hasWorkers()); - if (hasWorkers()) { - result = result && (getWorkers() - == other.getWorkers()); - } - result = result && (hasBandwidth() == other.hasBandwidth()); - if (hasBandwidth()) { - result = result && (getBandwidth() - == other.getBandwidth()); + result = result && getTargetTablesList() + .equals(other.getTargetTablesList()); + result = result && (hasBackupRootDir() == other.hasBackupRootDir()); + if (hasBackupRootDir()) { + result = result && getBackupRootDir() + .equals(other.getBackupRootDir()); + } + result = result && (hasDependencyCheckOnly() == other.hasDependencyCheckOnly()); + if (hasDependencyCheckOnly()) { + result = result && (getDependencyCheckOnly() + == other.getDependencyCheckOnly()); + } + result = result && (hasAutoRestore() == other.hasAutoRestore()); + if (hasAutoRestore()) { + result = result && (getAutoRestore() + == other.getAutoRestore()); + } + result = result && (hasOverwrite() == other.hasOverwrite()); + if (hasOverwrite()) { + result = result && (getOverwrite() + == other.getOverwrite()); } result = result && getUnknownFields().equals(other.getUnknownFields()); @@ -59678,78 +61701,86 @@ public final class MasterProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasType()) { - hash = (37 * hash) + TYPE_FIELD_NUMBER; - hash = (53 * hash) + hashEnum(getType()); + if (hasBackupId()) { + hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; + hash = (53 * hash) + getBackupId().hashCode(); } if (getTablesCount() > 0) { hash = (37 * hash) + TABLES_FIELD_NUMBER; hash = (53 * hash) + getTablesList().hashCode(); } - if (hasTargetRootDir()) { - hash = (37 * hash) + TARGET_ROOT_DIR_FIELD_NUMBER; - hash = (53 * hash) + getTargetRootDir().hashCode(); + if (getTargetTablesCount() > 0) { + hash = (37 * hash) + TARGET_TABLES_FIELD_NUMBER; + hash = (53 * hash) + getTargetTablesList().hashCode(); } - if (hasWorkers()) { - hash = (37 * hash) + WORKERS_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getWorkers()); + if (hasBackupRootDir()) { + hash = (37 * hash) + BACKUP_ROOT_DIR_FIELD_NUMBER; + hash = (53 * hash) + getBackupRootDir().hashCode(); } - if (hasBandwidth()) { - hash = (37 * hash) + BANDWIDTH_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getBandwidth()); + if (hasDependencyCheckOnly()) { + hash = (37 * hash) + DEPENDENCY_CHECK_ONLY_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getDependencyCheckOnly()); + } + if (hasAutoRestore()) { + hash = (37 * hash) + AUTO_RESTORE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getAutoRestore()); + } + if (hasOverwrite()) { + hash = (37 * hash) + OVERWRITE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getOverwrite()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -59758,7 +61789,7 @@ public final class MasterProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -59770,24 +61801,24 @@ public final class MasterProtos { return builder; } /** - * Protobuf type {@code hbase.pb.BackupTablesRequest} + * Protobuf type {@code hbase.pb.RestoreTablesRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequestOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -59800,6 +61831,7 @@ public final class MasterProtos { private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getTablesFieldBuilder(); + getTargetTablesFieldBuilder(); } } private static Builder create() { @@ -59808,7 +61840,7 @@ public final class MasterProtos { public Builder clear() { super.clear(); - type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + backupId_ = ""; bitField0_ = (bitField0_ & ~0x00000001); if (tablesBuilder_ == null) { tables_ = java.util.Collections.emptyList(); @@ -59816,12 +61848,20 @@ public final class MasterProtos { } else { tablesBuilder_.clear(); } - targetRootDir_ = ""; - bitField0_ = (bitField0_ & ~0x00000004); - workers_ = 0L; + if (targetTablesBuilder_ == null) { + targetTables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + targetTablesBuilder_.clear(); + } + backupRootDir_ = ""; bitField0_ = (bitField0_ & ~0x00000008); - bandwidth_ = 0L; + dependencyCheckOnly_ = false; bitField0_ = (bitField0_ & ~0x00000010); + autoRestore_ = false; + bitField0_ = (bitField0_ & ~0x00000020); + overwrite_ = false; + bitField0_ = (bitField0_ & ~0x00000040); return this; } @@ -59831,29 +61871,29 @@ public final class MasterProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesRequest_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesRequest_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - result.type_ = type_; + result.backupId_ = backupId_; if (tablesBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002)) { tables_ = java.util.Collections.unmodifiableList(tables_); @@ -59863,36 +61903,51 @@ public final class MasterProtos { } else { result.tables_ = tablesBuilder_.build(); } - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000002; + if (targetTablesBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + targetTables_ = java.util.Collections.unmodifiableList(targetTables_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.targetTables_ = targetTables_; + } else { + result.targetTables_ = targetTablesBuilder_.build(); } - result.targetRootDir_ = targetRootDir_; if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000004; + to_bitField0_ |= 0x00000002; } - result.workers_ = workers_; + result.backupRootDir_ = backupRootDir_; if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000004; + } + result.dependencyCheckOnly_ = dependencyCheckOnly_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000008; } - result.bandwidth_ = bandwidth_; + result.autoRestore_ = autoRestore_; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000010; + } + result.overwrite_ = overwrite_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance()) return this; - if (other.hasType()) { - setType(other.getType()); + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.getDefaultInstance()) return this; + if (other.hasBackupId()) { + bitField0_ |= 0x00000001; + backupId_ = other.backupId_; + onChanged(); } if (tablesBuilder_ == null) { if (!other.tables_.isEmpty()) { @@ -59920,27 +61975,56 @@ public final class MasterProtos { } } } - if (other.hasTargetRootDir()) { - bitField0_ |= 0x00000004; - targetRootDir_ = other.targetRootDir_; + if (targetTablesBuilder_ == null) { + if (!other.targetTables_.isEmpty()) { + if (targetTables_.isEmpty()) { + targetTables_ = other.targetTables_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureTargetTablesIsMutable(); + targetTables_.addAll(other.targetTables_); + } + onChanged(); + } + } else { + if (!other.targetTables_.isEmpty()) { + if (targetTablesBuilder_.isEmpty()) { + targetTablesBuilder_.dispose(); + targetTablesBuilder_ = null; + targetTables_ = other.targetTables_; + bitField0_ = (bitField0_ & ~0x00000004); + targetTablesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTargetTablesFieldBuilder() : null; + } else { + targetTablesBuilder_.addAllMessages(other.targetTables_); + } + } + } + if (other.hasBackupRootDir()) { + bitField0_ |= 0x00000008; + backupRootDir_ = other.backupRootDir_; onChanged(); } - if (other.hasWorkers()) { - setWorkers(other.getWorkers()); + if (other.hasDependencyCheckOnly()) { + setDependencyCheckOnly(other.getDependencyCheckOnly()); } - if (other.hasBandwidth()) { - setBandwidth(other.getBandwidth()); + if (other.hasAutoRestore()) { + setAutoRestore(other.getAutoRestore()); + } + if (other.hasOverwrite()) { + setOverwrite(other.getOverwrite()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { - if (!hasType()) { + if (!hasBackupId()) { return false; } - if (!hasTargetRootDir()) { + if (!hasBackupRootDir()) { return false; } @@ -59950,6 +62034,12 @@ public final class MasterProtos { return false; } } + for (int i = 0; i < getTargetTablesCount(); i++) { + if (!getTargetTables(i).isInitialized()) { + + return false; + } + } return true; } @@ -59957,11 +62047,11 @@ public final class MasterProtos { com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -59972,38 +62062,76 @@ public final class MasterProtos { } private int bitField0_; - // required .hbase.pb.BackupType type = 1; - private org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + // required string backup_id = 1; + private java.lang.Object backupId_ = ""; /** - * required .hbase.pb.BackupType type = 1; + * required string backup_id = 1; */ - public boolean hasType() { + public boolean hasBackupId() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * required .hbase.pb.BackupType type = 1; + * required string backup_id = 1; */ - public org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType getType() { - return type_; + public java.lang.String getBackupId() { + java.lang.Object ref = backupId_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + backupId_ = s; + return s; + } else { + return (java.lang.String) ref; + } } /** - * required .hbase.pb.BackupType type = 1; + * required string backup_id = 1; */ - public Builder setType(org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType value) { - if (value == null) { - throw new NullPointerException(); + public com.google.protobuf.ByteString + getBackupIdBytes() { + java.lang.Object ref = backupId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + backupId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; } - bitField0_ |= 0x00000001; - type_ = value; + } + /** + * required string backup_id = 1; + */ + public Builder setBackupId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; onChanged(); return this; } /** - * required .hbase.pb.BackupType type = 1; + * required string backup_id = 1; */ - public Builder clearType() { + public Builder clearBackupId() { bitField0_ = (bitField0_ & ~0x00000001); - type_ = org.apache.hadoop.hbase.protobuf.generated.BackupProtos.BackupType.FULL; + backupId_ = getDefaultInstance().getBackupId(); + onChanged(); + return this; + } + /** + * required string backup_id = 1; + */ + public Builder setBackupIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + backupId_ = value; onChanged(); return this; } @@ -60248,158 +62376,431 @@ public final class MasterProtos { return tablesBuilder_; } - // required string target_root_dir = 3; - private java.lang.Object targetRootDir_ = ""; + // repeated .hbase.pb.TableName target_tables = 3; + private java.util.List targetTables_ = + java.util.Collections.emptyList(); + private void ensureTargetTablesIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + targetTables_ = new java.util.ArrayList(targetTables_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> targetTablesBuilder_; + /** - * required string target_root_dir = 3; + * repeated .hbase.pb.TableName target_tables = 3; */ - public boolean hasTargetRootDir() { - return ((bitField0_ & 0x00000004) == 0x00000004); + public java.util.List getTargetTablesList() { + if (targetTablesBuilder_ == null) { + return java.util.Collections.unmodifiableList(targetTables_); + } else { + return targetTablesBuilder_.getMessageList(); + } } /** - * required string target_root_dir = 3; + * repeated .hbase.pb.TableName target_tables = 3; */ - public java.lang.String getTargetRootDir() { - java.lang.Object ref = targetRootDir_; + public int getTargetTablesCount() { + if (targetTablesBuilder_ == null) { + return targetTables_.size(); + } else { + return targetTablesBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTargetTables(int index) { + if (targetTablesBuilder_ == null) { + return targetTables_.get(index); + } else { + return targetTablesBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder setTargetTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (targetTablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTargetTablesIsMutable(); + targetTables_.set(index, value); + onChanged(); + } else { + targetTablesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder setTargetTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (targetTablesBuilder_ == null) { + ensureTargetTablesIsMutable(); + targetTables_.set(index, builderForValue.build()); + onChanged(); + } else { + targetTablesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder addTargetTables(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (targetTablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTargetTablesIsMutable(); + targetTables_.add(value); + onChanged(); + } else { + targetTablesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder addTargetTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (targetTablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTargetTablesIsMutable(); + targetTables_.add(index, value); + onChanged(); + } else { + targetTablesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder addTargetTables( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (targetTablesBuilder_ == null) { + ensureTargetTablesIsMutable(); + targetTables_.add(builderForValue.build()); + onChanged(); + } else { + targetTablesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder addTargetTables( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (targetTablesBuilder_ == null) { + ensureTargetTablesIsMutable(); + targetTables_.add(index, builderForValue.build()); + onChanged(); + } else { + targetTablesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder addAllTargetTables( + java.lang.Iterable values) { + if (targetTablesBuilder_ == null) { + ensureTargetTablesIsMutable(); + super.addAll(values, targetTables_); + onChanged(); + } else { + targetTablesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder clearTargetTables() { + if (targetTablesBuilder_ == null) { + targetTables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + targetTablesBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public Builder removeTargetTables(int index) { + if (targetTablesBuilder_ == null) { + ensureTargetTablesIsMutable(); + targetTables_.remove(index); + onChanged(); + } else { + targetTablesBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTargetTablesBuilder( + int index) { + return getTargetTablesFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTargetTablesOrBuilder( + int index) { + if (targetTablesBuilder_ == null) { + return targetTables_.get(index); } else { + return targetTablesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public java.util.List + getTargetTablesOrBuilderList() { + if (targetTablesBuilder_ != null) { + return targetTablesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(targetTables_); + } + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTargetTablesBuilder() { + return getTargetTablesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTargetTablesBuilder( + int index) { + return getTargetTablesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName target_tables = 3; + */ + public java.util.List + getTargetTablesBuilderList() { + return getTargetTablesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTargetTablesFieldBuilder() { + if (targetTablesBuilder_ == null) { + targetTablesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + targetTables_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + targetTables_ = null; + } + return targetTablesBuilder_; + } + + // required string backup_root_dir = 4; + private java.lang.Object backupRootDir_ = ""; + /** + * required string backup_root_dir = 4; + */ + public boolean hasBackupRootDir() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required string backup_root_dir = 4; + */ + public java.lang.String getBackupRootDir() { + java.lang.Object ref = backupRootDir_; if (!(ref instanceof java.lang.String)) { java.lang.String s = ((com.google.protobuf.ByteString) ref) .toStringUtf8(); - targetRootDir_ = s; + backupRootDir_ = s; return s; } else { return (java.lang.String) ref; } } /** - * required string target_root_dir = 3; + * required string backup_root_dir = 4; */ public com.google.protobuf.ByteString - getTargetRootDirBytes() { - java.lang.Object ref = targetRootDir_; + getBackupRootDirBytes() { + java.lang.Object ref = backupRootDir_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); - targetRootDir_ = b; + backupRootDir_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** - * required string target_root_dir = 3; + * required string backup_root_dir = 4; */ - public Builder setTargetRootDir( + public Builder setBackupRootDir( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000004; - targetRootDir_ = value; + bitField0_ |= 0x00000008; + backupRootDir_ = value; onChanged(); return this; } /** - * required string target_root_dir = 3; + * required string backup_root_dir = 4; */ - public Builder clearTargetRootDir() { - bitField0_ = (bitField0_ & ~0x00000004); - targetRootDir_ = getDefaultInstance().getTargetRootDir(); + public Builder clearBackupRootDir() { + bitField0_ = (bitField0_ & ~0x00000008); + backupRootDir_ = getDefaultInstance().getBackupRootDir(); onChanged(); return this; } /** - * required string target_root_dir = 3; + * required string backup_root_dir = 4; */ - public Builder setTargetRootDirBytes( + public Builder setBackupRootDirBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000004; - targetRootDir_ = value; + bitField0_ |= 0x00000008; + backupRootDir_ = value; onChanged(); return this; } - // optional int64 workers = 4; - private long workers_ ; + // optional bool dependency_check_only = 5; + private boolean dependencyCheckOnly_ ; /** - * optional int64 workers = 4; + * optional bool dependency_check_only = 5; */ - public boolean hasWorkers() { - return ((bitField0_ & 0x00000008) == 0x00000008); + public boolean hasDependencyCheckOnly() { + return ((bitField0_ & 0x00000010) == 0x00000010); } /** - * optional int64 workers = 4; + * optional bool dependency_check_only = 5; */ - public long getWorkers() { - return workers_; + public boolean getDependencyCheckOnly() { + return dependencyCheckOnly_; } /** - * optional int64 workers = 4; + * optional bool dependency_check_only = 5; */ - public Builder setWorkers(long value) { - bitField0_ |= 0x00000008; - workers_ = value; + public Builder setDependencyCheckOnly(boolean value) { + bitField0_ |= 0x00000010; + dependencyCheckOnly_ = value; onChanged(); return this; } /** - * optional int64 workers = 4; + * optional bool dependency_check_only = 5; */ - public Builder clearWorkers() { - bitField0_ = (bitField0_ & ~0x00000008); - workers_ = 0L; + public Builder clearDependencyCheckOnly() { + bitField0_ = (bitField0_ & ~0x00000010); + dependencyCheckOnly_ = false; onChanged(); return this; } - // optional int64 bandwidth = 5; - private long bandwidth_ ; + // optional bool auto_restore = 6; + private boolean autoRestore_ ; /** - * optional int64 bandwidth = 5; + * optional bool auto_restore = 6; */ - public boolean hasBandwidth() { - return ((bitField0_ & 0x00000010) == 0x00000010); + public boolean hasAutoRestore() { + return ((bitField0_ & 0x00000020) == 0x00000020); } /** - * optional int64 bandwidth = 5; + * optional bool auto_restore = 6; */ - public long getBandwidth() { - return bandwidth_; + public boolean getAutoRestore() { + return autoRestore_; } /** - * optional int64 bandwidth = 5; + * optional bool auto_restore = 6; */ - public Builder setBandwidth(long value) { - bitField0_ |= 0x00000010; - bandwidth_ = value; + public Builder setAutoRestore(boolean value) { + bitField0_ |= 0x00000020; + autoRestore_ = value; onChanged(); return this; } /** - * optional int64 bandwidth = 5; + * optional bool auto_restore = 6; */ - public Builder clearBandwidth() { - bitField0_ = (bitField0_ & ~0x00000010); - bandwidth_ = 0L; + public Builder clearAutoRestore() { + bitField0_ = (bitField0_ & ~0x00000020); + autoRestore_ = false; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:hbase.pb.BackupTablesRequest) + // optional bool overwrite = 7; + private boolean overwrite_ ; + /** + * optional bool overwrite = 7; + */ + public boolean hasOverwrite() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional bool overwrite = 7; + */ + public boolean getOverwrite() { + return overwrite_; + } + /** + * optional bool overwrite = 7; + */ + public Builder setOverwrite(boolean value) { + bitField0_ |= 0x00000040; + overwrite_ = value; + onChanged(); + return this; + } + /** + * optional bool overwrite = 7; + */ + public Builder clearOverwrite() { + bitField0_ = (bitField0_ & ~0x00000040); + overwrite_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.RestoreTablesRequest) } static { - defaultInstance = new BackupTablesRequest(true); + defaultInstance = new RestoreTablesRequest(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.BackupTablesRequest) + // @@protoc_insertion_point(class_scope:hbase.pb.RestoreTablesRequest) } - public interface BackupTablesResponseOrBuilder + public interface RestoreTablesResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { // optional uint64 proc_id = 1; @@ -60411,41 +62812,26 @@ public final class MasterProtos { * optional uint64 proc_id = 1; */ long getProcId(); - - // optional string backup_id = 2; - /** - * optional string backup_id = 2; - */ - boolean hasBackupId(); - /** - * optional string backup_id = 2; - */ - java.lang.String getBackupId(); - /** - * optional string backup_id = 2; - */ - com.google.protobuf.ByteString - getBackupIdBytes(); } /** - * Protobuf type {@code hbase.pb.BackupTablesResponse} + * Protobuf type {@code hbase.pb.RestoreTablesResponse} */ - public static final class BackupTablesResponse extends + public static final class RestoreTablesResponse extends com.google.protobuf.GeneratedMessage - implements BackupTablesResponseOrBuilder { - // Use BackupTablesResponse.newBuilder() to construct. - private BackupTablesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + implements RestoreTablesResponseOrBuilder { + // Use RestoreTablesResponse.newBuilder() to construct. + private RestoreTablesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private BackupTablesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private RestoreTablesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final BackupTablesResponse defaultInstance; - public static BackupTablesResponse getDefaultInstance() { + private static final RestoreTablesResponse defaultInstance; + public static RestoreTablesResponse getDefaultInstance() { return defaultInstance; } - public BackupTablesResponse getDefaultInstanceForType() { + public RestoreTablesResponse getDefaultInstanceForType() { return defaultInstance; } @@ -60455,7 +62841,7 @@ public final class MasterProtos { getUnknownFields() { return this.unknownFields; } - private BackupTablesResponse( + private RestoreTablesResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -60483,11 +62869,6 @@ public final class MasterProtos { procId_ = input.readUInt64(); break; } - case 18: { - bitField0_ |= 0x00000002; - backupId_ = input.readBytes(); - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -60502,28 +62883,28 @@ public final class MasterProtos { } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public BackupTablesResponse parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RestoreTablesResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new BackupTablesResponse(input, extensionRegistry); + return new RestoreTablesResponse(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } @@ -60544,52 +62925,8 @@ public final class MasterProtos { return procId_; } - // optional string backup_id = 2; - public static final int BACKUP_ID_FIELD_NUMBER = 2; - private java.lang.Object backupId_; - /** - * optional string backup_id = 2; - */ - public boolean hasBackupId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string backup_id = 2; - */ - public java.lang.String getBackupId() { - java.lang.Object ref = backupId_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - backupId_ = s; - } - return s; - } - } - /** - * optional string backup_id = 2; - */ - public com.google.protobuf.ByteString - getBackupIdBytes() { - java.lang.Object ref = backupId_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - private void initFields() { procId_ = 0L; - backupId_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -60606,9 +62943,6 @@ public final class MasterProtos { if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, procId_); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBytes(2, getBackupIdBytes()); - } getUnknownFields().writeTo(output); } @@ -60622,10 +62956,6 @@ public final class MasterProtos { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(1, procId_); } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(2, getBackupIdBytes()); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -60643,10 +62973,10 @@ public final class MasterProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) obj; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse) obj; boolean result = true; result = result && (hasProcId() == other.hasProcId()); @@ -60654,11 +62984,6 @@ public final class MasterProtos { result = result && (getProcId() == other.getProcId()); } - result = result && (hasBackupId() == other.hasBackupId()); - if (hasBackupId()) { - result = result && getBackupId() - .equals(other.getBackupId()); - } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -60676,62 +63001,58 @@ public final class MasterProtos { hash = (37 * hash) + PROC_ID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getProcId()); } - if (hasBackupId()) { - hash = (37 * hash) + BACKUP_ID_FIELD_NUMBER; - hash = (53 * hash) + getBackupId().hashCode(); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -60740,7 +63061,7 @@ public final class MasterProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -60752,24 +63073,24 @@ public final class MasterProtos { return builder; } /** - * Protobuf type {@code hbase.pb.BackupTablesResponse} + * Protobuf type {@code hbase.pb.RestoreTablesResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponseOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -60791,8 +63112,6 @@ public final class MasterProtos { super.clear(); procId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); - backupId_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); return this; } @@ -60802,57 +63121,48 @@ public final class MasterProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_BackupTablesResponse_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_RestoreTablesResponse_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse(this); + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.procId_ = procId_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.backupId_ = backupId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance()) return this; if (other.hasProcId()) { setProcId(other.getProcId()); } - if (other.hasBackupId()) { - bitField0_ |= 0x00000002; - backupId_ = other.backupId_; - onChanged(); - } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -60865,11 +63175,11 @@ public final class MasterProtos { com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -60913,89 +63223,15 @@ public final class MasterProtos { return this; } - // optional string backup_id = 2; - private java.lang.Object backupId_ = ""; - /** - * optional string backup_id = 2; - */ - public boolean hasBackupId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string backup_id = 2; - */ - public java.lang.String getBackupId() { - java.lang.Object ref = backupId_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - backupId_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string backup_id = 2; - */ - public com.google.protobuf.ByteString - getBackupIdBytes() { - java.lang.Object ref = backupId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - backupId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string backup_id = 2; - */ - public Builder setBackupId( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - backupId_ = value; - onChanged(); - return this; - } - /** - * optional string backup_id = 2; - */ - public Builder clearBackupId() { - bitField0_ = (bitField0_ & ~0x00000002); - backupId_ = getDefaultInstance().getBackupId(); - onChanged(); - return this; - } - /** - * optional string backup_id = 2; - */ - public Builder setBackupIdBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - backupId_ = value; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.BackupTablesResponse) + // @@protoc_insertion_point(builder_scope:hbase.pb.RestoreTablesResponse) } static { - defaultInstance = new BackupTablesResponse(true); + defaultInstance = new RestoreTablesResponse(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:hbase.pb.BackupTablesResponse) + // @@protoc_insertion_point(class_scope:hbase.pb.RestoreTablesResponse) } /** @@ -61731,6 +63967,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc restoreTables(.hbase.pb.RestoreTablesRequest) returns (.hbase.pb.RestoreTablesResponse); + * + *
+       ** restore table set 
+       * 
+ */ + public abstract void restoreTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -62200,6 +64448,14 @@ public final class MasterProtos { impl.backupTables(controller, request, done); } + @java.lang.Override + public void restoreTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request, + com.google.protobuf.RpcCallback done) { + impl.restoreTables(controller, request, done); + } + }; } @@ -62338,6 +64594,8 @@ public final class MasterProtos { return impl.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request); case 57: return impl.backupTables(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest)request); + case 58: + return impl.restoreTables(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -62468,6 +64726,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); case 57: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance(); + case 58: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -62598,6 +64858,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); case 57: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(); + case 58: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -63331,6 +65593,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc restoreTables(.hbase.pb.RestoreTablesRequest) returns (.hbase.pb.RestoreTablesResponse); + * + *
+     ** restore table set 
+     * 
+ */ + public abstract void restoreTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -63643,6 +65917,11 @@ public final class MasterProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 58: + this.restoreTables(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -63773,6 +66052,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); case 57: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest.getDefaultInstance(); + case 58: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -63903,6 +66184,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); case 57: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance(); + case 58: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -64793,6 +67076,21 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance())); } + + public void restoreTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(58), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -65090,6 +67388,11 @@ public final class MasterProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse restoreTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -65794,6 +68097,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BackupTablesResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse restoreTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(58), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:hbase.pb.MasterService) @@ -66354,6 +68669,16 @@ public final class MasterProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_BackupTablesResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RestoreTablesRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_RestoreTablesRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RestoreTablesResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_RestoreTablesResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -66560,142 +68885,152 @@ public final class MasterProtos { "bleName\022\027\n\017target_root_dir\030\003 \002(\t\022\017\n\007work" + "ers\030\004 \001(\003\022\021\n\tbandwidth\030\005 \001(\003\":\n\024BackupTa" + "blesResponse\022\017\n\007proc_id\030\001 \001(\004\022\021\n\tbackup_" + - "id\030\002 \001(\t*(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022" + - "\t\n\005MERGE\020\0012\242)\n\rMasterService\022e\n\024GetSchem" + - "aAlterStatus\022%.hbase.pb.GetSchemaAlterSt", - "atusRequest\032&.hbase.pb.GetSchemaAlterSta" + - "tusResponse\022b\n\023GetTableDescriptors\022$.hba" + - "se.pb.GetTableDescriptorsRequest\032%.hbase" + - ".pb.GetTableDescriptorsResponse\022P\n\rGetTa" + - "bleNames\022\036.hbase.pb.GetTableNamesRequest" + - "\032\037.hbase.pb.GetTableNamesResponse\022Y\n\020Get" + - "ClusterStatus\022!.hbase.pb.GetClusterStatu" + - "sRequest\032\".hbase.pb.GetClusterStatusResp" + - "onse\022V\n\017IsMasterRunning\022 .hbase.pb.IsMas" + - "terRunningRequest\032!.hbase.pb.IsMasterRun", - "ningResponse\022D\n\tAddColumn\022\032.hbase.pb.Add" + - "ColumnRequest\032\033.hbase.pb.AddColumnRespon" + - "se\022M\n\014DeleteColumn\022\035.hbase.pb.DeleteColu" + - "mnRequest\032\036.hbase.pb.DeleteColumnRespons" + - "e\022M\n\014ModifyColumn\022\035.hbase.pb.ModifyColum" + - "nRequest\032\036.hbase.pb.ModifyColumnResponse" + - "\022G\n\nMoveRegion\022\033.hbase.pb.MoveRegionRequ" + - "est\032\034.hbase.pb.MoveRegionResponse\022k\n\026Dis" + - "patchMergingRegions\022\'.hbase.pb.DispatchM" + - "ergingRegionsRequest\032(.hbase.pb.Dispatch", - "MergingRegionsResponse\022M\n\014AssignRegion\022\035" + - ".hbase.pb.AssignRegionRequest\032\036.hbase.pb" + - ".AssignRegionResponse\022S\n\016UnassignRegion\022" + - "\037.hbase.pb.UnassignRegionRequest\032 .hbase" + - ".pb.UnassignRegionResponse\022P\n\rOfflineReg" + - "ion\022\036.hbase.pb.OfflineRegionRequest\032\037.hb" + - "ase.pb.OfflineRegionResponse\022J\n\013DeleteTa" + - "ble\022\034.hbase.pb.DeleteTableRequest\032\035.hbas" + - "e.pb.DeleteTableResponse\022P\n\rtruncateTabl" + - "e\022\036.hbase.pb.TruncateTableRequest\032\037.hbas", - "e.pb.TruncateTableResponse\022J\n\013EnableTabl" + - "e\022\034.hbase.pb.EnableTableRequest\032\035.hbase." + - "pb.EnableTableResponse\022M\n\014DisableTable\022\035" + - ".hbase.pb.DisableTableRequest\032\036.hbase.pb" + - ".DisableTableResponse\022J\n\013ModifyTable\022\034.h" + - "base.pb.ModifyTableRequest\032\035.hbase.pb.Mo" + - "difyTableResponse\022J\n\013CreateTable\022\034.hbase" + - ".pb.CreateTableRequest\032\035.hbase.pb.Create" + - "TableResponse\022A\n\010Shutdown\022\031.hbase.pb.Shu" + - "tdownRequest\032\032.hbase.pb.ShutdownResponse", - "\022G\n\nStopMaster\022\033.hbase.pb.StopMasterRequ" + - "est\032\034.hbase.pb.StopMasterResponse\022>\n\007Bal" + - "ance\022\030.hbase.pb.BalanceRequest\032\031.hbase.p" + - "b.BalanceResponse\022_\n\022SetBalancerRunning\022" + - "#.hbase.pb.SetBalancerRunningRequest\032$.h" + - "base.pb.SetBalancerRunningResponse\022\\\n\021Is" + - "BalancerEnabled\022\".hbase.pb.IsBalancerEna" + - "bledRequest\032#.hbase.pb.IsBalancerEnabled" + - "Response\022k\n\026SetSplitOrMergeEnabled\022\'.hba" + - "se.pb.SetSplitOrMergeEnabledRequest\032(.hb", - "ase.pb.SetSplitOrMergeEnabledResponse\022h\n" + - "\025IsSplitOrMergeEnabled\022&.hbase.pb.IsSpli" + - "tOrMergeEnabledRequest\032\'.hbase.pb.IsSpli" + - "tOrMergeEnabledResponse\022D\n\tNormalize\022\032.h" + - "base.pb.NormalizeRequest\032\033.hbase.pb.Norm" + - "alizeResponse\022e\n\024SetNormalizerRunning\022%." + - "hbase.pb.SetNormalizerRunningRequest\032&.h" + - "base.pb.SetNormalizerRunningResponse\022b\n\023" + - "IsNormalizerEnabled\022$.hbase.pb.IsNormali" + - "zerEnabledRequest\032%.hbase.pb.IsNormalize", - "rEnabledResponse\022S\n\016RunCatalogScan\022\037.hba" + - "se.pb.RunCatalogScanRequest\032 .hbase.pb.R" + - "unCatalogScanResponse\022e\n\024EnableCatalogJa" + - "nitor\022%.hbase.pb.EnableCatalogJanitorReq" + - "uest\032&.hbase.pb.EnableCatalogJanitorResp" + - "onse\022n\n\027IsCatalogJanitorEnabled\022(.hbase." + - "pb.IsCatalogJanitorEnabledRequest\032).hbas" + - "e.pb.IsCatalogJanitorEnabledResponse\022^\n\021" + - "ExecMasterService\022#.hbase.pb.Coprocessor" + - "ServiceRequest\032$.hbase.pb.CoprocessorSer", - "viceResponse\022A\n\010Snapshot\022\031.hbase.pb.Snap" + - "shotRequest\032\032.hbase.pb.SnapshotResponse\022" + - "h\n\025GetCompletedSnapshots\022&.hbase.pb.GetC" + - "ompletedSnapshotsRequest\032\'.hbase.pb.GetC" + - "ompletedSnapshotsResponse\022S\n\016DeleteSnaps" + - "hot\022\037.hbase.pb.DeleteSnapshotRequest\032 .h" + - "base.pb.DeleteSnapshotResponse\022S\n\016IsSnap" + - "shotDone\022\037.hbase.pb.IsSnapshotDoneReques" + - "t\032 .hbase.pb.IsSnapshotDoneResponse\022V\n\017R" + - "estoreSnapshot\022 .hbase.pb.RestoreSnapsho", - "tRequest\032!.hbase.pb.RestoreSnapshotRespo" + - "nse\022h\n\025IsRestoreSnapshotDone\022&.hbase.pb." + - "IsRestoreSnapshotDoneRequest\032\'.hbase.pb." + - "IsRestoreSnapshotDoneResponse\022P\n\rExecPro" + - "cedure\022\036.hbase.pb.ExecProcedureRequest\032\037" + - ".hbase.pb.ExecProcedureResponse\022W\n\024ExecP" + - "rocedureWithRet\022\036.hbase.pb.ExecProcedure" + - "Request\032\037.hbase.pb.ExecProcedureResponse" + - "\022V\n\017IsProcedureDone\022 .hbase.pb.IsProcedu" + - "reDoneRequest\032!.hbase.pb.IsProcedureDone", - "Response\022V\n\017ModifyNamespace\022 .hbase.pb.M" + - "odifyNamespaceRequest\032!.hbase.pb.ModifyN" + - "amespaceResponse\022V\n\017CreateNamespace\022 .hb" + - "ase.pb.CreateNamespaceRequest\032!.hbase.pb" + - ".CreateNamespaceResponse\022V\n\017DeleteNamesp" + - "ace\022 .hbase.pb.DeleteNamespaceRequest\032!." + - "hbase.pb.DeleteNamespaceResponse\022k\n\026GetN" + - "amespaceDescriptor\022\'.hbase.pb.GetNamespa" + - "ceDescriptorRequest\032(.hbase.pb.GetNamesp" + - "aceDescriptorResponse\022q\n\030ListNamespaceDe", - "scriptors\022).hbase.pb.ListNamespaceDescri" + - "ptorsRequest\032*.hbase.pb.ListNamespaceDes" + - "criptorsResponse\022\206\001\n\037ListTableDescriptor" + - "sByNamespace\0220.hbase.pb.ListTableDescrip" + - "torsByNamespaceRequest\0321.hbase.pb.ListTa" + - "bleDescriptorsByNamespaceResponse\022t\n\031Lis" + - "tTableNamesByNamespace\022*.hbase.pb.ListTa" + - "bleNamesByNamespaceRequest\032+.hbase.pb.Li" + - "stTableNamesByNamespaceResponse\022P\n\rGetTa" + - "bleState\022\036.hbase.pb.GetTableStateRequest", - "\032\037.hbase.pb.GetTableStateResponse\022A\n\010Set" + - "Quota\022\031.hbase.pb.SetQuotaRequest\032\032.hbase" + - ".pb.SetQuotaResponse\022x\n\037getLastMajorComp" + - "actionTimestamp\022).hbase.pb.MajorCompacti" + - "onTimestampRequest\032*.hbase.pb.MajorCompa" + - "ctionTimestampResponse\022\212\001\n(getLastMajorC" + - "ompactionTimestampForRegion\0222.hbase.pb.M" + - "ajorCompactionTimestampForRegionRequest\032" + - "*.hbase.pb.MajorCompactionTimestampRespo" + - "nse\022_\n\022getProcedureResult\022#.hbase.pb.Get", - "ProcedureResultRequest\032$.hbase.pb.GetPro" + - "cedureResultResponse\022h\n\027getSecurityCapab" + - "ilities\022%.hbase.pb.SecurityCapabilitiesR" + - "equest\032&.hbase.pb.SecurityCapabilitiesRe" + - "sponse\022S\n\016AbortProcedure\022\037.hbase.pb.Abor" + - "tProcedureRequest\032 .hbase.pb.AbortProced" + - "ureResponse\022S\n\016ListProcedures\022\037.hbase.pb" + - ".ListProceduresRequest\032 .hbase.pb.ListPr" + - "oceduresResponse\022M\n\014backupTables\022\035.hbase" + - ".pb.BackupTablesRequest\032\036.hbase.pb.Backu", - "pTablesResponseBB\n*org.apache.hadoop.hba" + - "se.protobuf.generatedB\014MasterProtosH\001\210\001\001" + - "\240\001\001" + "id\030\002 \001(\t\"\333\001\n\024RestoreTablesRequest\022\021\n\tbac" + + "kup_id\030\001 \002(\t\022#\n\006tables\030\002 \003(\0132\023.hbase.pb." + + "TableName\022*\n\rtarget_tables\030\003 \003(\0132\023.hbase", + ".pb.TableName\022\027\n\017backup_root_dir\030\004 \002(\t\022\035" + + "\n\025dependency_check_only\030\005 \001(\010\022\024\n\014auto_re" + + "store\030\006 \001(\010\022\021\n\toverwrite\030\007 \001(\010\"(\n\025Restor" + + "eTablesResponse\022\017\n\007proc_id\030\001 \001(\004*(\n\020Mast" + + "erSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\001*8\n\022Re" + + "storeTablesState\022\016\n\nVALIDATION\020\001\022\022\n\016REST" + + "ORE_IMAGES\020\0022\364)\n\rMasterService\022e\n\024GetSch" + + "emaAlterStatus\022%.hbase.pb.GetSchemaAlter" + + "StatusRequest\032&.hbase.pb.GetSchemaAlterS" + + "tatusResponse\022b\n\023GetTableDescriptors\022$.h", + "base.pb.GetTableDescriptorsRequest\032%.hba" + + "se.pb.GetTableDescriptorsResponse\022P\n\rGet" + + "TableNames\022\036.hbase.pb.GetTableNamesReque" + + "st\032\037.hbase.pb.GetTableNamesResponse\022Y\n\020G" + + "etClusterStatus\022!.hbase.pb.GetClusterSta" + + "tusRequest\032\".hbase.pb.GetClusterStatusRe" + + "sponse\022V\n\017IsMasterRunning\022 .hbase.pb.IsM" + + "asterRunningRequest\032!.hbase.pb.IsMasterR" + + "unningResponse\022D\n\tAddColumn\022\032.hbase.pb.A" + + "ddColumnRequest\032\033.hbase.pb.AddColumnResp", + "onse\022M\n\014DeleteColumn\022\035.hbase.pb.DeleteCo" + + "lumnRequest\032\036.hbase.pb.DeleteColumnRespo" + + "nse\022M\n\014ModifyColumn\022\035.hbase.pb.ModifyCol" + + "umnRequest\032\036.hbase.pb.ModifyColumnRespon" + + "se\022G\n\nMoveRegion\022\033.hbase.pb.MoveRegionRe" + + "quest\032\034.hbase.pb.MoveRegionResponse\022k\n\026D" + + "ispatchMergingRegions\022\'.hbase.pb.Dispatc" + + "hMergingRegionsRequest\032(.hbase.pb.Dispat" + + "chMergingRegionsResponse\022M\n\014AssignRegion" + + "\022\035.hbase.pb.AssignRegionRequest\032\036.hbase.", + "pb.AssignRegionResponse\022S\n\016UnassignRegio" + + "n\022\037.hbase.pb.UnassignRegionRequest\032 .hba" + + "se.pb.UnassignRegionResponse\022P\n\rOfflineR" + + "egion\022\036.hbase.pb.OfflineRegionRequest\032\037." + + "hbase.pb.OfflineRegionResponse\022J\n\013Delete" + + "Table\022\034.hbase.pb.DeleteTableRequest\032\035.hb" + + "ase.pb.DeleteTableResponse\022P\n\rtruncateTa" + + "ble\022\036.hbase.pb.TruncateTableRequest\032\037.hb" + + "ase.pb.TruncateTableResponse\022J\n\013EnableTa" + + "ble\022\034.hbase.pb.EnableTableRequest\032\035.hbas", + "e.pb.EnableTableResponse\022M\n\014DisableTable" + + "\022\035.hbase.pb.DisableTableRequest\032\036.hbase." + + "pb.DisableTableResponse\022J\n\013ModifyTable\022\034" + + ".hbase.pb.ModifyTableRequest\032\035.hbase.pb." + + "ModifyTableResponse\022J\n\013CreateTable\022\034.hba" + + "se.pb.CreateTableRequest\032\035.hbase.pb.Crea" + + "teTableResponse\022A\n\010Shutdown\022\031.hbase.pb.S" + + "hutdownRequest\032\032.hbase.pb.ShutdownRespon" + + "se\022G\n\nStopMaster\022\033.hbase.pb.StopMasterRe" + + "quest\032\034.hbase.pb.StopMasterResponse\022>\n\007B", + "alance\022\030.hbase.pb.BalanceRequest\032\031.hbase" + + ".pb.BalanceResponse\022_\n\022SetBalancerRunnin" + + "g\022#.hbase.pb.SetBalancerRunningRequest\032$" + + ".hbase.pb.SetBalancerRunningResponse\022\\\n\021" + + "IsBalancerEnabled\022\".hbase.pb.IsBalancerE" + + "nabledRequest\032#.hbase.pb.IsBalancerEnabl" + + "edResponse\022k\n\026SetSplitOrMergeEnabled\022\'.h" + + "base.pb.SetSplitOrMergeEnabledRequest\032(." + + "hbase.pb.SetSplitOrMergeEnabledResponse\022" + + "h\n\025IsSplitOrMergeEnabled\022&.hbase.pb.IsSp", + "litOrMergeEnabledRequest\032\'.hbase.pb.IsSp" + + "litOrMergeEnabledResponse\022D\n\tNormalize\022\032" + + ".hbase.pb.NormalizeRequest\032\033.hbase.pb.No" + + "rmalizeResponse\022e\n\024SetNormalizerRunning\022" + + "%.hbase.pb.SetNormalizerRunningRequest\032&" + + ".hbase.pb.SetNormalizerRunningResponse\022b" + + "\n\023IsNormalizerEnabled\022$.hbase.pb.IsNorma" + + "lizerEnabledRequest\032%.hbase.pb.IsNormali" + + "zerEnabledResponse\022S\n\016RunCatalogScan\022\037.h" + + "base.pb.RunCatalogScanRequest\032 .hbase.pb", + ".RunCatalogScanResponse\022e\n\024EnableCatalog" + + "Janitor\022%.hbase.pb.EnableCatalogJanitorR" + + "equest\032&.hbase.pb.EnableCatalogJanitorRe" + + "sponse\022n\n\027IsCatalogJanitorEnabled\022(.hbas" + + "e.pb.IsCatalogJanitorEnabledRequest\032).hb" + + "ase.pb.IsCatalogJanitorEnabledResponse\022^" + + "\n\021ExecMasterService\022#.hbase.pb.Coprocess" + + "orServiceRequest\032$.hbase.pb.CoprocessorS" + + "erviceResponse\022A\n\010Snapshot\022\031.hbase.pb.Sn" + + "apshotRequest\032\032.hbase.pb.SnapshotRespons", + "e\022h\n\025GetCompletedSnapshots\022&.hbase.pb.Ge" + + "tCompletedSnapshotsRequest\032\'.hbase.pb.Ge" + + "tCompletedSnapshotsResponse\022S\n\016DeleteSna" + + "pshot\022\037.hbase.pb.DeleteSnapshotRequest\032 " + + ".hbase.pb.DeleteSnapshotResponse\022S\n\016IsSn" + + "apshotDone\022\037.hbase.pb.IsSnapshotDoneRequ" + + "est\032 .hbase.pb.IsSnapshotDoneResponse\022V\n" + + "\017RestoreSnapshot\022 .hbase.pb.RestoreSnaps" + + "hotRequest\032!.hbase.pb.RestoreSnapshotRes" + + "ponse\022h\n\025IsRestoreSnapshotDone\022&.hbase.p", + "b.IsRestoreSnapshotDoneRequest\032\'.hbase.p" + + "b.IsRestoreSnapshotDoneResponse\022P\n\rExecP" + + "rocedure\022\036.hbase.pb.ExecProcedureRequest" + + "\032\037.hbase.pb.ExecProcedureResponse\022W\n\024Exe" + + "cProcedureWithRet\022\036.hbase.pb.ExecProcedu" + + "reRequest\032\037.hbase.pb.ExecProcedureRespon" + + "se\022V\n\017IsProcedureDone\022 .hbase.pb.IsProce" + + "dureDoneRequest\032!.hbase.pb.IsProcedureDo" + + "neResponse\022V\n\017ModifyNamespace\022 .hbase.pb" + + ".ModifyNamespaceRequest\032!.hbase.pb.Modif", + "yNamespaceResponse\022V\n\017CreateNamespace\022 ." + + "hbase.pb.CreateNamespaceRequest\032!.hbase." + + "pb.CreateNamespaceResponse\022V\n\017DeleteName" + + "space\022 .hbase.pb.DeleteNamespaceRequest\032" + + "!.hbase.pb.DeleteNamespaceResponse\022k\n\026Ge" + + "tNamespaceDescriptor\022\'.hbase.pb.GetNames" + + "paceDescriptorRequest\032(.hbase.pb.GetName" + + "spaceDescriptorResponse\022q\n\030ListNamespace" + + "Descriptors\022).hbase.pb.ListNamespaceDesc" + + "riptorsRequest\032*.hbase.pb.ListNamespaceD", + "escriptorsResponse\022\206\001\n\037ListTableDescript" + + "orsByNamespace\0220.hbase.pb.ListTableDescr" + + "iptorsByNamespaceRequest\0321.hbase.pb.List" + + "TableDescriptorsByNamespaceResponse\022t\n\031L" + + "istTableNamesByNamespace\022*.hbase.pb.List" + + "TableNamesByNamespaceRequest\032+.hbase.pb." + + "ListTableNamesByNamespaceResponse\022P\n\rGet" + + "TableState\022\036.hbase.pb.GetTableStateReque" + + "st\032\037.hbase.pb.GetTableStateResponse\022A\n\010S" + + "etQuota\022\031.hbase.pb.SetQuotaRequest\032\032.hba", + "se.pb.SetQuotaResponse\022x\n\037getLastMajorCo" + + "mpactionTimestamp\022).hbase.pb.MajorCompac" + + "tionTimestampRequest\032*.hbase.pb.MajorCom" + + "pactionTimestampResponse\022\212\001\n(getLastMajo" + + "rCompactionTimestampForRegion\0222.hbase.pb" + + ".MajorCompactionTimestampForRegionReques" + + "t\032*.hbase.pb.MajorCompactionTimestampRes" + + "ponse\022_\n\022getProcedureResult\022#.hbase.pb.G" + + "etProcedureResultRequest\032$.hbase.pb.GetP" + + "rocedureResultResponse\022h\n\027getSecurityCap", + "abilities\022%.hbase.pb.SecurityCapabilitie" + + "sRequest\032&.hbase.pb.SecurityCapabilities" + + "Response\022S\n\016AbortProcedure\022\037.hbase.pb.Ab" + + "ortProcedureRequest\032 .hbase.pb.AbortProc" + + "edureResponse\022S\n\016ListProcedures\022\037.hbase." + + "pb.ListProceduresRequest\032 .hbase.pb.List" + + "ProceduresResponse\022M\n\014backupTables\022\035.hba" + + "se.pb.BackupTablesRequest\032\036.hbase.pb.Bac" + + "kupTablesResponse\022P\n\rrestoreTables\022\036.hba" + + "se.pb.RestoreTablesRequest\032\037.hbase.pb.Re", + "storeTablesResponseBB\n*org.apache.hadoop" + + ".hbase.protobuf.generatedB\014MasterProtosH" + + "\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -67368,6 +69703,18 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BackupTablesResponse_descriptor, new java.lang.String[] { "ProcId", "BackupId", }); + internal_static_hbase_pb_RestoreTablesRequest_descriptor = + getDescriptor().getMessageTypes().get(111); + internal_static_hbase_pb_RestoreTablesRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_RestoreTablesRequest_descriptor, + new java.lang.String[] { "BackupId", "Tables", "TargetTables", "BackupRootDir", "DependencyCheckOnly", "AutoRestore", "Overwrite", }); + internal_static_hbase_pb_RestoreTablesResponse_descriptor = + getDescriptor().getMessageTypes().get(112); + internal_static_hbase_pb_RestoreTablesResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_RestoreTablesResponse_descriptor, + new java.lang.String[] { "ProcId", }); return null; } }; diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto index 6431c73..f7e777b 100644 --- a/hbase-protocol/src/main/protobuf/Master.proto +++ b/hbase-protocol/src/main/protobuf/Master.proto @@ -554,6 +554,25 @@ message BackupTablesResponse { optional string backup_id = 2; } +enum RestoreTablesState { + VALIDATION = 1; + RESTORE_IMAGES = 2; +} + +message RestoreTablesRequest { + required string backup_id = 1; + repeated TableName tables = 2; + repeated TableName target_tables = 3; + required string backup_root_dir = 4; + optional bool dependency_check_only = 5; + optional bool auto_restore = 6; + optional bool overwrite = 7; +} + +message RestoreTablesResponse { + optional uint64 proc_id = 1; +} + service MasterService { /** Used by the client to get the number of regions that have received the updated schema */ rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest) @@ -832,4 +851,8 @@ service MasterService { /** backup table set */ rpc backupTables(BackupTablesRequest) returns(BackupTablesResponse); + + /** restore table set */ + rpc restoreTables(RestoreTablesRequest) + returns(RestoreTablesResponse); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java index f16d213..0854d1a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreClientImpl.java @@ -89,7 +89,7 @@ public final class RestoreClientImpl implements RestoreClient { try { // Check and validate the backup image and its dependencies if (check || autoRestore) { - if (validate(backupManifestMap)) { + if (validate(backupManifestMap, conf)) { LOG.info("Checking backup images: ok"); } else { String errMsg = "Some dependencies are missing for restore"; @@ -126,8 +126,8 @@ public final class RestoreClientImpl implements RestoreClient { } - private boolean validate(HashMap backupManifestMap) - throws IOException { + public static boolean validate(HashMap backupManifestMap, + Configuration conf) throws IOException { boolean isValid = true; for (Entry manifestEntry : backupManifestMap.entrySet()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesProcedure.java new file mode 100644 index 0000000..0dc400b --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesProcedure.java @@ -0,0 +1,248 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.backup.impl; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map.Entry; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.backup.BackupClientUtil; +import org.apache.hadoop.hbase.backup.BackupRestoreFactory; +import org.apache.hadoop.hbase.backup.BackupType; +import org.apache.hadoop.hbase.backup.HBackupFileSystem; +import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; +import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreTablesState; +import org.apache.hadoop.hbase.protobuf.generated.BackupProtos.ServerTimestamp; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hdfs.web.resources.SnapshotNameParam; + +@InterfaceAudience.Private +public class RestoreTablesProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(RestoreTablesProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + private Configuration conf; + private String backupId; + private List tableList; + private List tTableList; + private String targetRootDir; + private boolean autoRestore; + private boolean isOverwrite; + + public RestoreTablesProcedure() { + // Required by the Procedure framework to create the procedure on replay + } + + public RestoreTablesProcedure(final MasterProcedureEnv env, + final String targetRootDir, String backupId, boolean autoRestore, List sTableList, + List tTableList, boolean isOverwrite) throws IOException { + this.targetRootDir = targetRootDir; + this.backupId = backupId; + this.tableList = sTableList; + this.tTableList = tTableList; + if (tTableList == null || tTableList.isEmpty()) { + this.tTableList = tableList; + } + this.autoRestore = autoRestore; + this.isOverwrite = isOverwrite; + } + + @Override + public byte[] getResult() { + return null; + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, final RestoreTablesState state) + throws InterruptedException { + if (conf == null) { + conf = env.getMasterConfiguration(); + } + if (LOG.isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + if (tTableList == null || tTableList.isEmpty()) { + tTableList = tableList; + } + Connection conn = env.getMasterServices().getClusterConnection(); + TableName[] tTableArray = tTableList.toArray(new TableName[tTableList.size()]); + try (Admin admin = conn.getAdmin()) { + switch (state) { + case VALIDATION: + + // check the target tables + RestoreClientImpl.checkTargetTables(tTableArray, isOverwrite, conf); + + setNextState(RestoreTablesState.RESTORE_IMAGES); + break; + case RESTORE_IMAGES: + TableName[] sTableArray = tableList.toArray(new TableName[tableList.size()]); + HashMap backupManifestMap = new HashMap<>(); + // check and load backup image manifest for the tables + Path rootPath = new Path(targetRootDir); + HBackupFileSystem.checkImageManifestExist(backupManifestMap, sTableArray, conf, rootPath, + backupId); + RestoreClientImpl.restoreStage(backupManifestMap, sTableArray, tTableArray, autoRestore, + conf);; + + return Flow.NO_MORE_STATE; + + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + setFailure("restore-table", e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final RestoreTablesState state) + throws IOException { + } + + @Override + protected RestoreTablesState getState(final int stateId) { + return RestoreTablesState.valueOf(stateId); + } + + @Override + protected int getStateId(final RestoreTablesState state) { + return state.getNumber(); + } + + @Override + protected RestoreTablesState getInitialState() { + return RestoreTablesState.VALIDATION; + } + + @Override + protected void setNextState(final RestoreTablesState state) { + if (aborted.get()) { + setAbortFailure("snapshot-table", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (targetRootDir="); + sb.append(targetRootDir); + sb.append(")"); + } + + MasterProtos.RestoreTablesRequest toRestoreTables() { + MasterProtos.RestoreTablesRequest.Builder bldr = MasterProtos.RestoreTablesRequest.newBuilder(); + bldr.setAutoRestore(autoRestore).setOverwrite(isOverwrite).setBackupId(backupId); + bldr.setBackupRootDir(targetRootDir); + for (TableName table : tableList) { + bldr.addTables(ProtobufUtil.toProtoTableName(table)); + } + for (TableName table : tTableList) { + bldr.addTargetTables(ProtobufUtil.toProtoTableName(table)); + } + return bldr.build(); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProtos.RestoreTablesRequest restoreTables = toRestoreTables(); + restoreTables.writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + MasterProtos.RestoreTablesRequest proto = + MasterProtos.RestoreTablesRequest.parseDelimitedFrom(stream); + backupId = proto.getBackupId(); + targetRootDir = proto.getBackupRootDir(); + autoRestore = proto.getAutoRestore(); + isOverwrite = proto.getOverwrite(); + tableList = new ArrayList<>(proto.getTablesList().size()); + for (HBaseProtos.TableName table : proto.getTablesList()) { + tableList.add(ProtobufUtil.toTableName(table)); + } + tTableList = new ArrayList<>(proto.getTargetTablesList().size()); + for (HBaseProtos.TableName table : proto.getTargetTablesList()) { + tTableList.add(ProtobufUtil.toTableName(table)); + } + } + + @Override + public TableName getTableName() { + return TableName.BACKUP_TABLE_NAME; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.RESTORE; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (env.waitInitialized(this)) { + return false; + } + return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableExclusiveLock(this, TableName.BACKUP_TABLE_NAME); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 30da779..d5f54d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -30,6 +30,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -79,8 +80,11 @@ import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.HBackupFileSystem; import org.apache.hadoop.hbase.backup.impl.BackupManager; +import org.apache.hadoop.hbase.backup.impl.BackupManifest; import org.apache.hadoop.hbase.backup.impl.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.impl.BackupSystemTable; +import org.apache.hadoop.hbase.backup.impl.RestoreClientImpl; +import org.apache.hadoop.hbase.backup.impl.RestoreTablesProcedure; import org.apache.hadoop.hbase.backup.master.FullTableBackupProcedure; import org.apache.hadoop.hbase.backup.master.IncrementalTableBackupProcedure; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -2678,6 +2682,35 @@ public class HMaster extends HRegionServer implements MasterServices { return new Pair<>(procId, backupId); } + @Override + public long restoreTables(String backupRootDir, + String backupId, boolean check, boolean autoRestore, List sTableList, + List tTableList, boolean isOverwrite) throws IOException { + if (check) { + HashMap backupManifestMap = new HashMap<>(); + // check and load backup image manifest for the tables + Path rootPath = new Path(backupRootDir); + HBackupFileSystem.checkImageManifestExist(backupManifestMap, + sTableList.toArray(new TableName[sTableList.size()]), + conf, rootPath, backupId); + + // Check and validate the backup image and its dependencies + if (check || autoRestore) { + if (RestoreClientImpl.validate(backupManifestMap, conf)) { + LOG.info("Checking backup images: ok"); + } else { + String errMsg = "Some dependencies are missing for restore"; + LOG.error(errMsg); + throw new IOException(errMsg); + } + } + } + long procId = this.procedureExecutor.submitProcedure( + new RestoreTablesProcedure(procedureExecutor.getEnvironment(), backupRootDir, backupId, + autoRestore, sTableList, tTableList, isOverwrite)); + return procId; + } + /** * Returns the list of table descriptors that match the specified request * @param namespace the namespace to query, or null if querying for all diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 930bfd6..3c380b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1072,6 +1072,28 @@ public class MasterRpcServices extends RSRpcServices } @Override + public MasterProtos.RestoreTablesResponse restoreTables( + RpcController controller, + MasterProtos.RestoreTablesRequest request) throws ServiceException { + try { + RestoreTablesResponse.Builder response = RestoreTablesResponse.newBuilder(); + List tablesList = new ArrayList<>(request.getTablesList().size()); + for (HBaseProtos.TableName table : request.getTablesList()) { + tablesList.add(ProtobufUtil.toTableName(table)); + } + List targetTablesList = new ArrayList<>(request.getTargetTablesList().size()); + for (HBaseProtos.TableName table : request.getTargetTablesList()) { + targetTablesList.add(ProtobufUtil.toTableName(table)); + } + long procId = master.restoreTables(request.getBackupRootDir(), request.getBackupId(), + request.getDependencyCheckOnly(), request.getAutoRestore(), tablesList, targetTablesList, + request.getOverwrite()); + return response.setProcId(procId).build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + @Override public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c, ListTableDescriptorsByNamespaceRequest request) throws ServiceException { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 3557bb4..8d503dd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -198,6 +198,13 @@ public interface MasterServices extends Server { final int workers, final long bandwidth) throws IOException; + /* + * Restore table set + */ + public long restoreTables(String backupRootDir, + String backupId, boolean check, boolean autoRestore, List sTableList, + List tTableList, boolean isOverwrite) throws IOException; + /** * Enable an existing table * @param tableName The table name diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java index 56983a9..5356d2d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; @InterfaceStability.Evolving public interface TableProcedureInterface { public enum TableOperationType { - CREATE, DELETE, DISABLE, EDIT, ENABLE, READ, BACKUP, + CREATE, DELETE, DISABLE, EDIT, ENABLE, READ, BACKUP, RESTORE, }; /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java index 983b850..f1a0e04 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestFullRestore.java @@ -14,12 +14,15 @@ package org.apache.hadoop.hbase.backup; import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.util.ToolRunner; @@ -33,6 +36,28 @@ public class TestFullRestore extends TestBackupBase { private static final Log LOG = LogFactory.getLog(TestFullRestore.class); + void restoreTables(String backupRootDir, + String backupId, boolean check, boolean autoRestore, List sTable, + List tTable, boolean isOverwrite) throws IOException { + Connection conn = null; + HBaseAdmin admin = null; + RestoreRequest request = new RestoreRequest(); + request.setAutoRestore(autoRestore).setDependencyCheckOnly(check).setOverwrite(isOverwrite); + request.setBackupId(backupId).setBackupRootDir(backupRootDir); + request.setTableList(sTable).setTargetTableList(tTable); + try { + conn = ConnectionFactory.createConnection(conf1); + admin = (HBaseAdmin) conn.getAdmin(); + admin.restoreTablesSync(request); + } finally { + if (admin != null) { + admin.close(); + } + if (conn != null) { + conn.close(); + } + } + } /** * Verify that a single table is restored to a new table * @throws Exception @@ -48,10 +73,11 @@ public class TestFullRestore extends TestBackupBase { LOG.info("backup complete"); - TableName[] tableset = new TableName[] { table1 }; - TableName[] tablemap = new TableName[] { table1_restore }; - RestoreClient client = getRestoreClient(); - client.restore(BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap, false); + List tableset = new ArrayList<>(); + tableset.add(table1); + List tablemap = new ArrayList<>(); + tablemap.add(table1_restore); + restoreTables(BACKUP_ROOT_DIR, backupId, false, false, tableset, tablemap, false); HBaseAdmin hba = TEST_UTIL.getHBaseAdmin(); assertTrue(hba.tableExists(table1_restore)); TEST_UTIL.deleteTable(table1_restore); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index d3d5357..ac1181b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -440,6 +440,12 @@ public class TestCatalogJanitor { return null; } + @Override + public long restoreTables(String backupRootDir, + String backupId, boolean check, boolean autoRestore, List sTableList, + List tTableList, boolean isOverwrite) throws IOException { + return 1; + } @Override public List listTableDescriptorsByNamespace(String name) throws IOException {