diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index c3b524b..54d2cb9 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -1330,6 +1330,22 @@ public interface Admin extends Abortable, Closeable { void restoreSnapshot(final String snapshotName) throws IOException, RestoreSnapshotException; /** + * Restore the specified snapshot on the original table. (The table must be disabled) If the + * "hbase.snapshot.restore.take.failsafe.snapshot" configuration property is set to true, a + * snapshot of the current table is taken before executing the restore operation. In case of + * restore failure, the failsafe snapshot will be restored. If the restore completes without + * problem the failsafe snapshot is deleted. + * + * @param snapshotName name of the snapshot to restore + * @throws IOException if a remote or network exception occurs + * @throws RestoreSnapshotException if snapshot failed to be restored + * @return the result of the async restore snapshot. You can use Future.get(long, TimeUnit) + * to wait on the operation to complete. + */ + Future restoreSnapshotAsync(final String snapshotName) + throws IOException, RestoreSnapshotException; + + /** * Restore the specified snapshot on the original table. (The table must be disabled) If * 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken before * executing the restore operation. In case of restore failure, the failsafe snapshot will be @@ -1360,7 +1376,7 @@ public interface Admin extends Abortable, Closeable { * @throws RestoreSnapshotException if snapshot failed to be restored * @throws IllegalArgumentException if the restore request is formatted incorrectly */ - void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot) + void restoreSnapshot(final String snapshotName, final boolean takeFailSafeSnapshot) throws IOException, RestoreSnapshotException; /** @@ -1390,6 +1406,24 @@ public interface Admin extends Abortable, Closeable { throws IOException, TableExistsException, RestoreSnapshotException; /** + * Create a new table by cloning the snapshot content, but does not block + * and wait for it be completely cloned. + * You can use Future.get(long, TimeUnit) to wait on the operation to complete. + * It may throw ExecutionException if there was an error while executing the operation + * or TimeoutException in case the wait timeout was not long enough to allow the + * operation to complete. + * + * @param snapshotName name of the snapshot to be cloned + * @param tableName name of the table where the snapshot will be restored + * @throws IOException if a remote or network exception occurs + * @throws TableExistsException if table to be cloned already exists + * @return the result of the async clone snapshot. You can use Future.get(long, TimeUnit) + * to wait on the operation to complete. + */ + Future cloneSnapshotAsync(final String snapshotName, final TableName tableName) + throws IOException, TableExistsException; + + /** * Execute a distributed procedure on a cluster. * * @param signature A distributed procedure is uniquely identified by its signature (default the diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index ecaf18b..c71d81a 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -1588,13 +1588,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } @Override - public MasterProtos.IsRestoreSnapshotDoneResponse isRestoreSnapshotDone( - RpcController controller, MasterProtos.IsRestoreSnapshotDoneRequest request) - throws ServiceException { - return stub.isRestoreSnapshotDone(controller, request); - } - - @Override public MasterProtos.ExecProcedureResponse execProcedure( RpcController controller, MasterProtos.ExecProcedureRequest request) throws ServiceException { diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index c1d07ae..aea86b9 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -127,8 +127,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescripto import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; @@ -693,47 +691,6 @@ public class HBaseAdmin implements Admin { get(enableTableAsync(tableName), syncWaitTimeout, TimeUnit.MILLISECONDS); } - /** - * Wait for the table to be enabled and available - * If enabling the table exceeds the retry period, an exception is thrown. - * @param tableName name of the table - * @throws IOException if a remote or network exception occurs or - * table is not enabled after the retries period. - */ - private void waitUntilTableIsEnabled(final TableName tableName) throws IOException { - boolean enabled = false; - long start = EnvironmentEdgeManager.currentTime(); - for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) { - try { - enabled = isTableEnabled(tableName); - } catch (TableNotFoundException tnfe) { - // wait for table to be created - enabled = false; - } - enabled = enabled && isTableAvailable(tableName); - if (enabled) { - break; - } - long sleep = getPauseTime(tries); - if (LOG.isDebugEnabled()) { - LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " + - "enabled in " + tableName); - } - try { - Thread.sleep(sleep); - } catch (InterruptedException e) { - // Do this conversion rather than let it out because do not want to - // change the method signature. - throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e); - } - } - if (!enabled) { - long msec = EnvironmentEdgeManager.currentTime() - start; - throw new IOException("Table '" + tableName + - "' not yet enabled, after " + msec + "ms."); - } - } - @Override public Future enableTableAsync(final TableName tableName) throws IOException { TableName.isLegalFullyQualifiedTableName(tableName.getName()); @@ -2430,8 +2387,14 @@ public class HBaseAdmin implements Admin { restoreSnapshot(Bytes.toString(snapshotName), takeFailSafeSnapshot); } - @Override - public void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot) + /* + * Check whether the snapshot exists and contains disabled table + * + * @param snapshotName name of the snapshot to restore + * @throws IOException if a remote or network exception occurs + * @throws RestoreSnapshotException if no valid snapshot is found + */ + private TableName getTableNameBeforeRestoreSnapshot(final String snapshotName) throws IOException, RestoreSnapshotException { TableName tableName = null; for (SnapshotDescription snapshotInfo: listSnapshots()) { @@ -2445,6 +2408,13 @@ public class HBaseAdmin implements Admin { throw new RestoreSnapshotException( "Unable to find the table name for snapshot=" + snapshotName); } + return tableName; + } + + @Override + public void restoreSnapshot(final String snapshotName, final boolean takeFailSafeSnapshot) + throws IOException, RestoreSnapshotException { + TableName tableName = getTableNameBeforeRestoreSnapshot(snapshotName); // The table does not exists, switch to clone. if (!tableExists(tableName)) { @@ -2472,13 +2442,19 @@ public class HBaseAdmin implements Admin { try { // Restore snapshot - internalRestoreSnapshot(snapshotName, tableName); + get( + internalRestoreSnapshotAsync(snapshotName, tableName), + syncWaitTimeout, + TimeUnit.MILLISECONDS); } catch (IOException e) { // Somthing went wrong during the restore... // if the pre-restore snapshot is available try to rollback if (takeFailSafeSnapshot) { try { - internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName); + get( + internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName), + syncWaitTimeout, + TimeUnit.MILLISECONDS); String msg = "Restore snapshot=" + snapshotName + " failed. Rollback to snapshot=" + failSafeSnapshotSnapshotName + " succeeded."; LOG.error(msg, e); @@ -2505,6 +2481,24 @@ public class HBaseAdmin implements Admin { } @Override + public Future restoreSnapshotAsync(final String snapshotName) + throws IOException, RestoreSnapshotException { + TableName tableName = getTableNameBeforeRestoreSnapshot(snapshotName); + + // The table does not exists, switch to clone. + if (!tableExists(tableName)) { + return cloneSnapshotAsync(snapshotName, tableName); + } + + // Check if the table is disabled + if (!isTableDisabled(tableName)) { + throw new TableNotDisabledException(tableName); + } + + return internalRestoreSnapshotAsync(snapshotName, tableName); + } + + @Override public void cloneSnapshot(final byte[] snapshotName, final TableName tableName) throws IOException, TableExistsException, RestoreSnapshotException { cloneSnapshot(Bytes.toString(snapshotName), tableName); @@ -2516,8 +2510,19 @@ public class HBaseAdmin implements Admin { if (tableExists(tableName)) { throw new TableExistsException(tableName); } - internalRestoreSnapshot(snapshotName, tableName); - waitUntilTableIsEnabled(tableName); + get( + internalRestoreSnapshotAsync(snapshotName, tableName), + Integer.MAX_VALUE, + TimeUnit.MILLISECONDS); + } + + @Override + public Future cloneSnapshotAsync(final String snapshotName, final TableName tableName) + throws IOException, TableExistsException { + if (tableExists(tableName)) { + throw new TableExistsException(tableName); + } + return internalRestoreSnapshotAsync(snapshotName, tableName); } @Override @@ -2632,73 +2637,59 @@ public class HBaseAdmin implements Admin { * @throws RestoreSnapshotException if snapshot failed to be restored * @throws IllegalArgumentException if the restore request is formatted incorrectly */ - private void internalRestoreSnapshot(final String snapshotName, final TableName tableName) - throws IOException, RestoreSnapshotException { - SnapshotDescription snapshot = SnapshotDescription.newBuilder() + private Future internalRestoreSnapshotAsync( + final String snapshotName, + final TableName tableName) throws IOException, RestoreSnapshotException { + final SnapshotDescription snapshot = SnapshotDescription.newBuilder() .setName(snapshotName).setTable(tableName.getNameAsString()).build(); // actually restore the snapshot - internalRestoreSnapshotAsync(snapshot); - - final IsRestoreSnapshotDoneRequest request = IsRestoreSnapshotDoneRequest.newBuilder() - .setSnapshot(snapshot).build(); - IsRestoreSnapshotDoneResponse done = IsRestoreSnapshotDoneResponse.newBuilder() - .setDone(false).buildPartial(); - final long maxPauseTime = 5000; - int tries = 0; - while (!done.getDone()) { - try { - // sleep a backoff <= pauseTime amount - long sleep = getPauseTime(tries++); - sleep = sleep > maxPauseTime ? maxPauseTime : sleep; - LOG.debug(tries + ") Sleeping: " + sleep - + " ms while we wait for snapshot restore to complete."); - Thread.sleep(sleep); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e); - } - LOG.debug("Getting current status of snapshot restore from master..."); - done = executeCallable(new MasterCallable( - getConnection()) { - @Override - public IsRestoreSnapshotDoneResponse call(int callTimeout) throws ServiceException { - PayloadCarryingRpcController controller = rpcControllerFactory.newController(); - controller.setCallTimeout(callTimeout); - return master.isRestoreSnapshotDone(controller, request); - } - }); - } - if (!done.getDone()) { - throw new RestoreSnapshotException("Snapshot '" + snapshot.getName() + "' wasn't restored."); - } - } - - /** - * Execute Restore/Clone snapshot and wait for the server to complete (asynchronous) - *

- * Only a single snapshot should be restored at a time, or results may be undefined. - * @param snapshot snapshot to restore - * @return response from the server indicating the max time to wait for the snapshot - * @throws IOException if a remote or network exception occurs - * @throws RestoreSnapshotException if snapshot failed to be restored - * @throws IllegalArgumentException if the restore request is formatted incorrectly - */ - private RestoreSnapshotResponse internalRestoreSnapshotAsync(final SnapshotDescription snapshot) - throws IOException, RestoreSnapshotException { ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot); - final RestoreSnapshotRequest request = RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot) - .build(); - - // run the snapshot restore on the master - return executeCallable(new MasterCallable(getConnection()) { + RestoreSnapshotResponse response = executeCallable( + new MasterCallable(getConnection()) { @Override public RestoreSnapshotResponse call(int callTimeout) throws ServiceException { + final RestoreSnapshotRequest request = RestoreSnapshotRequest.newBuilder() + .setSnapshot(snapshot) + .setNonceGroup(ng.getNonceGroup()) + .setNonce(ng.newNonce()) + .build(); PayloadCarryingRpcController controller = rpcControllerFactory.newController(); controller.setCallTimeout(callTimeout); return master.restoreSnapshot(controller, request); } }); + + return new RestoreSnapshotFuture( + this, snapshot, TableName.valueOf(snapshot.getTable()), response); + } + + private static class RestoreSnapshotFuture extends TableFuture { + public RestoreSnapshotFuture( + final HBaseAdmin admin, + final SnapshotDescription snapshot, + final TableName tableName, + final RestoreSnapshotResponse response) { + super(admin, tableName, + (response != null && response.hasProcId()) ? response.getProcId() : null); + + if (response != null && !response.hasProcId()) { + throw new UnsupportedOperationException("Client could not call old version of Server"); + } + } + + public RestoreSnapshotFuture( + final HBaseAdmin admin, + final TableName tableName, + final Long procId) { + super(admin, tableName, procId); + } + + @Override + public String getOperationType() { + return "MODIFY"; + } } @Override diff --git hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index 38dd062..3f0ba37 100644 --- hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -694,7 +694,7 @@ public class ProcedureExecutor { * @return true if the procedure execution is finished, otherwise false. */ public boolean isFinished(final long procId) { - return completed.containsKey(procId); + return !procedures.containsKey(procId); } /** diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java index d40c1f7..9ed9d7a 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java @@ -1380,6 +1380,224 @@ public final class MasterProcedureProtos { } /** + * Protobuf enum {@code hbase.pb.CloneSnapshotState} + */ + public enum CloneSnapshotState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * CLONE_SNAPSHOT_PRE_OPERATION = 1; + */ + CLONE_SNAPSHOT_PRE_OPERATION(0, 1), + /** + * CLONE_SNAPSHOT_WRITE_FS_LAYOUT = 2; + */ + CLONE_SNAPSHOT_WRITE_FS_LAYOUT(1, 2), + /** + * CLONE_SNAPSHOT_ADD_TO_META = 3; + */ + CLONE_SNAPSHOT_ADD_TO_META(2, 3), + /** + * CLONE_SNAPSHOT_ASSIGN_REGIONS = 4; + */ + CLONE_SNAPSHOT_ASSIGN_REGIONS(3, 4), + /** + * CLONE_SNAPSHOT_UPDATE_DESC_CACHE = 5; + */ + CLONE_SNAPSHOT_UPDATE_DESC_CACHE(4, 5), + /** + * CLONE_SNAPSHOT_POST_OPERATION = 6; + */ + CLONE_SNAPSHOT_POST_OPERATION(5, 6), + ; + + /** + * CLONE_SNAPSHOT_PRE_OPERATION = 1; + */ + public static final int CLONE_SNAPSHOT_PRE_OPERATION_VALUE = 1; + /** + * CLONE_SNAPSHOT_WRITE_FS_LAYOUT = 2; + */ + public static final int CLONE_SNAPSHOT_WRITE_FS_LAYOUT_VALUE = 2; + /** + * CLONE_SNAPSHOT_ADD_TO_META = 3; + */ + public static final int CLONE_SNAPSHOT_ADD_TO_META_VALUE = 3; + /** + * CLONE_SNAPSHOT_ASSIGN_REGIONS = 4; + */ + public static final int CLONE_SNAPSHOT_ASSIGN_REGIONS_VALUE = 4; + /** + * CLONE_SNAPSHOT_UPDATE_DESC_CACHE = 5; + */ + public static final int CLONE_SNAPSHOT_UPDATE_DESC_CACHE_VALUE = 5; + /** + * CLONE_SNAPSHOT_POST_OPERATION = 6; + */ + public static final int CLONE_SNAPSHOT_POST_OPERATION_VALUE = 6; + + + public final int getNumber() { return value; } + + public static CloneSnapshotState valueOf(int value) { + switch (value) { + case 1: return CLONE_SNAPSHOT_PRE_OPERATION; + case 2: return CLONE_SNAPSHOT_WRITE_FS_LAYOUT; + case 3: return CLONE_SNAPSHOT_ADD_TO_META; + case 4: return CLONE_SNAPSHOT_ASSIGN_REGIONS; + case 5: return CLONE_SNAPSHOT_UPDATE_DESC_CACHE; + case 6: return CLONE_SNAPSHOT_POST_OPERATION; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public CloneSnapshotState findValueByNumber(int number) { + return CloneSnapshotState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(12); + } + + private static final CloneSnapshotState[] VALUES = values(); + + public static CloneSnapshotState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private CloneSnapshotState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.CloneSnapshotState) + } + + /** + * Protobuf enum {@code hbase.pb.RestoreSnapshotState} + */ + public enum RestoreSnapshotState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * RESTORE_SNAPSHOT_PRE_OPERATION = 1; + */ + RESTORE_SNAPSHOT_PRE_OPERATION(0, 1), + /** + * RESTORE_SNAPSHOT_UPDATE_TABLE_DESCRIPTOR = 2; + */ + RESTORE_SNAPSHOT_UPDATE_TABLE_DESCRIPTOR(1, 2), + /** + * RESTORE_SNAPSHOT_WRITE_FS_LAYOUT = 3; + */ + RESTORE_SNAPSHOT_WRITE_FS_LAYOUT(2, 3), + /** + * RESTORE_SNAPSHOT_UPDATE_META = 4; + */ + RESTORE_SNAPSHOT_UPDATE_META(3, 4), + ; + + /** + * RESTORE_SNAPSHOT_PRE_OPERATION = 1; + */ + public static final int RESTORE_SNAPSHOT_PRE_OPERATION_VALUE = 1; + /** + * RESTORE_SNAPSHOT_UPDATE_TABLE_DESCRIPTOR = 2; + */ + public static final int RESTORE_SNAPSHOT_UPDATE_TABLE_DESCRIPTOR_VALUE = 2; + /** + * RESTORE_SNAPSHOT_WRITE_FS_LAYOUT = 3; + */ + public static final int RESTORE_SNAPSHOT_WRITE_FS_LAYOUT_VALUE = 3; + /** + * RESTORE_SNAPSHOT_UPDATE_META = 4; + */ + public static final int RESTORE_SNAPSHOT_UPDATE_META_VALUE = 4; + + + public final int getNumber() { return value; } + + public static RestoreSnapshotState valueOf(int value) { + switch (value) { + case 1: return RESTORE_SNAPSHOT_PRE_OPERATION; + case 2: return RESTORE_SNAPSHOT_UPDATE_TABLE_DESCRIPTOR; + case 3: return RESTORE_SNAPSHOT_WRITE_FS_LAYOUT; + case 4: return RESTORE_SNAPSHOT_UPDATE_META; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public RestoreSnapshotState findValueByNumber(int number) { + return RestoreSnapshotState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(13); + } + + private static final RestoreSnapshotState[] VALUES = values(); + + public static RestoreSnapshotState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private RestoreSnapshotState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:hbase.pb.RestoreSnapshotState) + } + + /** * Protobuf enum {@code hbase.pb.ServerCrashState} */ public enum ServerCrashState @@ -1507,7 +1725,7 @@ public final class MasterProcedureProtos { } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(12); + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(14); } private static final ServerCrashState[] VALUES = values(); @@ -13727,122 +13945,73 @@ public final class MasterProcedureProtos { // @@protoc_insertion_point(class_scope:hbase.pb.DisableTableStateData) } - public interface ServerCrashStateDataOrBuilder + public interface RestoreParentToChildRegionsPairOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required .hbase.pb.ServerName server_name = 1; - /** - * required .hbase.pb.ServerName server_name = 1; - */ - boolean hasServerName(); - /** - * required .hbase.pb.ServerName server_name = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(); - /** - * required .hbase.pb.ServerName server_name = 1; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(); - - // optional bool distributed_log_replay = 2; - /** - * optional bool distributed_log_replay = 2; - */ - boolean hasDistributedLogReplay(); - /** - * optional bool distributed_log_replay = 2; - */ - boolean getDistributedLogReplay(); - - // repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; - /** - * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; - */ - java.util.List - getRegionsOnCrashedServerList(); - /** - * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionsOnCrashedServer(int index); + // required string parent_region_name = 1; /** - * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; + * required string parent_region_name = 1; */ - int getRegionsOnCrashedServerCount(); + boolean hasParentRegionName(); /** - * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; + * required string parent_region_name = 1; */ - java.util.List - getRegionsOnCrashedServerOrBuilderList(); + java.lang.String getParentRegionName(); /** - * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; + * required string parent_region_name = 1; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionsOnCrashedServerOrBuilder( - int index); + com.google.protobuf.ByteString + getParentRegionNameBytes(); - // repeated .hbase.pb.RegionInfo regions_assigned = 4; - /** - * repeated .hbase.pb.RegionInfo regions_assigned = 4; - */ - java.util.List - getRegionsAssignedList(); - /** - * repeated .hbase.pb.RegionInfo regions_assigned = 4; - */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionsAssigned(int index); + // required string child1_region_name = 2; /** - * repeated .hbase.pb.RegionInfo regions_assigned = 4; + * required string child1_region_name = 2; */ - int getRegionsAssignedCount(); + boolean hasChild1RegionName(); /** - * repeated .hbase.pb.RegionInfo regions_assigned = 4; + * required string child1_region_name = 2; */ - java.util.List - getRegionsAssignedOrBuilderList(); + java.lang.String getChild1RegionName(); /** - * repeated .hbase.pb.RegionInfo regions_assigned = 4; + * required string child1_region_name = 2; */ - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionsAssignedOrBuilder( - int index); + com.google.protobuf.ByteString + getChild1RegionNameBytes(); - // optional bool carrying_meta = 5; + // required string child2_region_name = 3; /** - * optional bool carrying_meta = 5; - */ - boolean hasCarryingMeta(); - /** - * optional bool carrying_meta = 5; + * required string child2_region_name = 3; */ - boolean getCarryingMeta(); - - // optional bool should_split_wal = 6 [default = true]; + boolean hasChild2RegionName(); /** - * optional bool should_split_wal = 6 [default = true]; + * required string child2_region_name = 3; */ - boolean hasShouldSplitWal(); + java.lang.String getChild2RegionName(); /** - * optional bool should_split_wal = 6 [default = true]; + * required string child2_region_name = 3; */ - boolean getShouldSplitWal(); + com.google.protobuf.ByteString + getChild2RegionNameBytes(); } /** - * Protobuf type {@code hbase.pb.ServerCrashStateData} + * Protobuf type {@code hbase.pb.RestoreParentToChildRegionsPair} */ - public static final class ServerCrashStateData extends + public static final class RestoreParentToChildRegionsPair extends com.google.protobuf.GeneratedMessage - implements ServerCrashStateDataOrBuilder { - // Use ServerCrashStateData.newBuilder() to construct. - private ServerCrashStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + implements RestoreParentToChildRegionsPairOrBuilder { + // Use RestoreParentToChildRegionsPair.newBuilder() to construct. + private RestoreParentToChildRegionsPair(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private ServerCrashStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private RestoreParentToChildRegionsPair(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final ServerCrashStateData defaultInstance; - public static ServerCrashStateData getDefaultInstance() { + private static final RestoreParentToChildRegionsPair defaultInstance; + public static RestoreParentToChildRegionsPair getDefaultInstance() { return defaultInstance; } - public ServerCrashStateData getDefaultInstanceForType() { + public RestoreParentToChildRegionsPair getDefaultInstanceForType() { return defaultInstance; } @@ -13852,7 +14021,7 @@ public final class MasterProcedureProtos { getUnknownFields() { return this.unknownFields; } - private ServerCrashStateData( + private RestoreParentToChildRegionsPair( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -13876,38 +14045,5220 @@ public final class MasterProcedureProtos { break; } case 10: { - org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - subBuilder = serverName_.toBuilder(); - } - serverName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(serverName_); - serverName_ = subBuilder.buildPartial(); - } bitField0_ |= 0x00000001; + parentRegionName_ = input.readBytes(); break; } - case 16: { + case 18: { bitField0_ |= 0x00000002; - distributedLogReplay_ = input.readBool(); + child1RegionName_ = input.readBytes(); break; } case 26: { - if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { - regionsOnCrashedServer_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000004; - } - regionsOnCrashedServer_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry)); + bitField0_ |= 0x00000004; + child2RegionName_ = input.readBytes(); break; } - case 34: { - if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { - regionsAssigned_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000008; - } - regionsAssigned_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry)); - break; + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_RestoreParentToChildRegionsPair_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_RestoreParentToChildRegionsPair_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RestoreParentToChildRegionsPair parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RestoreParentToChildRegionsPair(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string parent_region_name = 1; + public static final int PARENT_REGION_NAME_FIELD_NUMBER = 1; + private java.lang.Object parentRegionName_; + /** + * required string parent_region_name = 1; + */ + public boolean hasParentRegionName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string parent_region_name = 1; + */ + public java.lang.String getParentRegionName() { + java.lang.Object ref = parentRegionName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + parentRegionName_ = s; + } + return s; + } + } + /** + * required string parent_region_name = 1; + */ + public com.google.protobuf.ByteString + getParentRegionNameBytes() { + java.lang.Object ref = parentRegionName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + parentRegionName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string child1_region_name = 2; + public static final int CHILD1_REGION_NAME_FIELD_NUMBER = 2; + private java.lang.Object child1RegionName_; + /** + * required string child1_region_name = 2; + */ + public boolean hasChild1RegionName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string child1_region_name = 2; + */ + public java.lang.String getChild1RegionName() { + java.lang.Object ref = child1RegionName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + child1RegionName_ = s; + } + return s; + } + } + /** + * required string child1_region_name = 2; + */ + public com.google.protobuf.ByteString + getChild1RegionNameBytes() { + java.lang.Object ref = child1RegionName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + child1RegionName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string child2_region_name = 3; + public static final int CHILD2_REGION_NAME_FIELD_NUMBER = 3; + private java.lang.Object child2RegionName_; + /** + * required string child2_region_name = 3; + */ + public boolean hasChild2RegionName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string child2_region_name = 3; + */ + public java.lang.String getChild2RegionName() { + java.lang.Object ref = child2RegionName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + child2RegionName_ = s; + } + return s; + } + } + /** + * required string child2_region_name = 3; + */ + public com.google.protobuf.ByteString + getChild2RegionNameBytes() { + java.lang.Object ref = child2RegionName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + child2RegionName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + parentRegionName_ = ""; + child1RegionName_ = ""; + child2RegionName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasParentRegionName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasChild1RegionName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasChild2RegionName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getParentRegionNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getChild1RegionNameBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getChild2RegionNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getParentRegionNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getChild1RegionNameBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getChild2RegionNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair) obj; + + boolean result = true; + result = result && (hasParentRegionName() == other.hasParentRegionName()); + if (hasParentRegionName()) { + result = result && getParentRegionName() + .equals(other.getParentRegionName()); + } + result = result && (hasChild1RegionName() == other.hasChild1RegionName()); + if (hasChild1RegionName()) { + result = result && getChild1RegionName() + .equals(other.getChild1RegionName()); + } + result = result && (hasChild2RegionName() == other.hasChild2RegionName()); + if (hasChild2RegionName()) { + result = result && getChild2RegionName() + .equals(other.getChild2RegionName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasParentRegionName()) { + hash = (37 * hash) + PARENT_REGION_NAME_FIELD_NUMBER; + hash = (53 * hash) + getParentRegionName().hashCode(); + } + if (hasChild1RegionName()) { + hash = (37 * hash) + CHILD1_REGION_NAME_FIELD_NUMBER; + hash = (53 * hash) + getChild1RegionName().hashCode(); + } + if (hasChild2RegionName()) { + hash = (37 * hash) + CHILD2_REGION_NAME_FIELD_NUMBER; + hash = (53 * hash) + getChild2RegionName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.RestoreParentToChildRegionsPair} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPairOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_RestoreParentToChildRegionsPair_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_RestoreParentToChildRegionsPair_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + parentRegionName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + child1RegionName_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + child2RegionName_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_RestoreParentToChildRegionsPair_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.parentRegionName_ = parentRegionName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.child1RegionName_ = child1RegionName_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.child2RegionName_ = child2RegionName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.getDefaultInstance()) return this; + if (other.hasParentRegionName()) { + bitField0_ |= 0x00000001; + parentRegionName_ = other.parentRegionName_; + onChanged(); + } + if (other.hasChild1RegionName()) { + bitField0_ |= 0x00000002; + child1RegionName_ = other.child1RegionName_; + onChanged(); + } + if (other.hasChild2RegionName()) { + bitField0_ |= 0x00000004; + child2RegionName_ = other.child2RegionName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasParentRegionName()) { + + return false; + } + if (!hasChild1RegionName()) { + + return false; + } + if (!hasChild2RegionName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string parent_region_name = 1; + private java.lang.Object parentRegionName_ = ""; + /** + * required string parent_region_name = 1; + */ + public boolean hasParentRegionName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string parent_region_name = 1; + */ + public java.lang.String getParentRegionName() { + java.lang.Object ref = parentRegionName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + parentRegionName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string parent_region_name = 1; + */ + public com.google.protobuf.ByteString + getParentRegionNameBytes() { + java.lang.Object ref = parentRegionName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + parentRegionName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string parent_region_name = 1; + */ + public Builder setParentRegionName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + parentRegionName_ = value; + onChanged(); + return this; + } + /** + * required string parent_region_name = 1; + */ + public Builder clearParentRegionName() { + bitField0_ = (bitField0_ & ~0x00000001); + parentRegionName_ = getDefaultInstance().getParentRegionName(); + onChanged(); + return this; + } + /** + * required string parent_region_name = 1; + */ + public Builder setParentRegionNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + parentRegionName_ = value; + onChanged(); + return this; + } + + // required string child1_region_name = 2; + private java.lang.Object child1RegionName_ = ""; + /** + * required string child1_region_name = 2; + */ + public boolean hasChild1RegionName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string child1_region_name = 2; + */ + public java.lang.String getChild1RegionName() { + java.lang.Object ref = child1RegionName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + child1RegionName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string child1_region_name = 2; + */ + public com.google.protobuf.ByteString + getChild1RegionNameBytes() { + java.lang.Object ref = child1RegionName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + child1RegionName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string child1_region_name = 2; + */ + public Builder setChild1RegionName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + child1RegionName_ = value; + onChanged(); + return this; + } + /** + * required string child1_region_name = 2; + */ + public Builder clearChild1RegionName() { + bitField0_ = (bitField0_ & ~0x00000002); + child1RegionName_ = getDefaultInstance().getChild1RegionName(); + onChanged(); + return this; + } + /** + * required string child1_region_name = 2; + */ + public Builder setChild1RegionNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + child1RegionName_ = value; + onChanged(); + return this; + } + + // required string child2_region_name = 3; + private java.lang.Object child2RegionName_ = ""; + /** + * required string child2_region_name = 3; + */ + public boolean hasChild2RegionName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string child2_region_name = 3; + */ + public java.lang.String getChild2RegionName() { + java.lang.Object ref = child2RegionName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + child2RegionName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string child2_region_name = 3; + */ + public com.google.protobuf.ByteString + getChild2RegionNameBytes() { + java.lang.Object ref = child2RegionName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + child2RegionName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string child2_region_name = 3; + */ + public Builder setChild2RegionName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + child2RegionName_ = value; + onChanged(); + return this; + } + /** + * required string child2_region_name = 3; + */ + public Builder clearChild2RegionName() { + bitField0_ = (bitField0_ & ~0x00000004); + child2RegionName_ = getDefaultInstance().getChild2RegionName(); + onChanged(); + return this; + } + /** + * required string child2_region_name = 3; + */ + public Builder setChild2RegionNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + child2RegionName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.RestoreParentToChildRegionsPair) + } + + static { + defaultInstance = new RestoreParentToChildRegionsPair(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RestoreParentToChildRegionsPair) + } + + public interface CloneSnapshotStateDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.UserInformation user_info = 1; + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + boolean hasUserInfo(); + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo(); + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder(); + + // required .hbase.pb.SnapshotDescription snapshot = 2; + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + boolean hasSnapshot(); + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot(); + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder(); + + // required .hbase.pb.TableSchema table_schema = 3; + /** + * required .hbase.pb.TableSchema table_schema = 3; + */ + boolean hasTableSchema(); + /** + * required .hbase.pb.TableSchema table_schema = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema(); + /** + * required .hbase.pb.TableSchema table_schema = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder(); + + // repeated .hbase.pb.RegionInfo region_info = 4; + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + java.util.List + getRegionInfoList(); + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index); + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + int getRegionInfoCount(); + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + java.util.List + getRegionInfoOrBuilderList(); + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder( + int index); + + // repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + java.util.List + getParentToChildRegionsPairListList(); + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair getParentToChildRegionsPairList(int index); + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + int getParentToChildRegionsPairListCount(); + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + java.util.List + getParentToChildRegionsPairListOrBuilderList(); + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPairOrBuilder getParentToChildRegionsPairListOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.CloneSnapshotStateData} + */ + public static final class CloneSnapshotStateData extends + com.google.protobuf.GeneratedMessage + implements CloneSnapshotStateDataOrBuilder { + // Use CloneSnapshotStateData.newBuilder() to construct. + private CloneSnapshotStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private CloneSnapshotStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final CloneSnapshotStateData defaultInstance; + public static CloneSnapshotStateData getDefaultInstance() { + return defaultInstance; + } + + public CloneSnapshotStateData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private CloneSnapshotStateData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = userInfo_.toBuilder(); + } + userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(userInfo_); + userInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = snapshot_.toBuilder(); + } + snapshot_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(snapshot_); + snapshot_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = tableSchema_.toBuilder(); + } + tableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableSchema_); + tableSchema_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + regionInfo_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + regionInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry)); + break; + } + case 42: { + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + parentToChildRegionsPairList_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000010; + } + parentToChildRegionsPairList_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_); + } + if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + parentToChildRegionsPairList_ = java.util.Collections.unmodifiableList(parentToChildRegionsPairList_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_CloneSnapshotStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_CloneSnapshotStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public CloneSnapshotStateData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new CloneSnapshotStateData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.UserInformation user_info = 1; + public static final int USER_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_; + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + return userInfo_; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + return userInfo_; + } + + // required .hbase.pb.SnapshotDescription snapshot = 2; + public static final int SNAPSHOT_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription snapshot_; + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public boolean hasSnapshot() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot() { + return snapshot_; + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() { + return snapshot_; + } + + // required .hbase.pb.TableSchema table_schema = 3; + public static final int TABLE_SCHEMA_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema tableSchema_; + /** + * required .hbase.pb.TableSchema table_schema = 3; + */ + public boolean hasTableSchema() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .hbase.pb.TableSchema table_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() { + return tableSchema_; + } + /** + * required .hbase.pb.TableSchema table_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() { + return tableSchema_; + } + + // repeated .hbase.pb.RegionInfo region_info = 4; + public static final int REGION_INFO_FIELD_NUMBER = 4; + private java.util.List regionInfo_; + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public java.util.List getRegionInfoList() { + return regionInfo_; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public java.util.List + getRegionInfoOrBuilderList() { + return regionInfo_; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public int getRegionInfoCount() { + return regionInfo_.size(); + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) { + return regionInfo_.get(index); + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder( + int index) { + return regionInfo_.get(index); + } + + // repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + public static final int PARENT_TO_CHILD_REGIONS_PAIR_LIST_FIELD_NUMBER = 5; + private java.util.List parentToChildRegionsPairList_; + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public java.util.List getParentToChildRegionsPairListList() { + return parentToChildRegionsPairList_; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public java.util.List + getParentToChildRegionsPairListOrBuilderList() { + return parentToChildRegionsPairList_; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public int getParentToChildRegionsPairListCount() { + return parentToChildRegionsPairList_.size(); + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair getParentToChildRegionsPairList(int index) { + return parentToChildRegionsPairList_.get(index); + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPairOrBuilder getParentToChildRegionsPairListOrBuilder( + int index) { + return parentToChildRegionsPairList_.get(index); + } + + private void initFields() { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance(); + tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + regionInfo_ = java.util.Collections.emptyList(); + parentToChildRegionsPairList_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasUserInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSnapshot()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTableSchema()) { + memoizedIsInitialized = 0; + return false; + } + if (!getUserInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getSnapshot().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableSchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getRegionInfoCount(); i++) { + if (!getRegionInfo(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getParentToChildRegionsPairListCount(); i++) { + if (!getParentToChildRegionsPairList(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, snapshot_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, tableSchema_); + } + for (int i = 0; i < regionInfo_.size(); i++) { + output.writeMessage(4, regionInfo_.get(i)); + } + for (int i = 0; i < parentToChildRegionsPairList_.size(); i++) { + output.writeMessage(5, parentToChildRegionsPairList_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, snapshot_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, tableSchema_); + } + for (int i = 0; i < regionInfo_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, regionInfo_.get(i)); + } + for (int i = 0; i < parentToChildRegionsPairList_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, parentToChildRegionsPairList_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData) obj; + + boolean result = true; + result = result && (hasUserInfo() == other.hasUserInfo()); + if (hasUserInfo()) { + result = result && getUserInfo() + .equals(other.getUserInfo()); + } + result = result && (hasSnapshot() == other.hasSnapshot()); + if (hasSnapshot()) { + result = result && getSnapshot() + .equals(other.getSnapshot()); + } + result = result && (hasTableSchema() == other.hasTableSchema()); + if (hasTableSchema()) { + result = result && getTableSchema() + .equals(other.getTableSchema()); + } + result = result && getRegionInfoList() + .equals(other.getRegionInfoList()); + result = result && getParentToChildRegionsPairListList() + .equals(other.getParentToChildRegionsPairListList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUserInfo()) { + hash = (37 * hash) + USER_INFO_FIELD_NUMBER; + hash = (53 * hash) + getUserInfo().hashCode(); + } + if (hasSnapshot()) { + hash = (37 * hash) + SNAPSHOT_FIELD_NUMBER; + hash = (53 * hash) + getSnapshot().hashCode(); + } + if (hasTableSchema()) { + hash = (37 * hash) + TABLE_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getTableSchema().hashCode(); + } + if (getRegionInfoCount() > 0) { + hash = (37 * hash) + REGION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getRegionInfoList().hashCode(); + } + if (getParentToChildRegionsPairListCount() > 0) { + hash = (37 * hash) + PARENT_TO_CHILD_REGIONS_PAIR_LIST_FIELD_NUMBER; + hash = (53 * hash) + getParentToChildRegionsPairListList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.CloneSnapshotStateData} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_CloneSnapshotStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_CloneSnapshotStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getUserInfoFieldBuilder(); + getSnapshotFieldBuilder(); + getTableSchemaFieldBuilder(); + getRegionInfoFieldBuilder(); + getParentToChildRegionsPairListFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (snapshotBuilder_ == null) { + snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance(); + } else { + snapshotBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (tableSchemaBuilder_ == null) { + tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } else { + tableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + if (regionInfoBuilder_ == null) { + regionInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + } else { + regionInfoBuilder_.clear(); + } + if (parentToChildRegionsPairListBuilder_ == null) { + parentToChildRegionsPairList_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + } else { + parentToChildRegionsPairListBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_CloneSnapshotStateData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (userInfoBuilder_ == null) { + result.userInfo_ = userInfo_; + } else { + result.userInfo_ = userInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (snapshotBuilder_ == null) { + result.snapshot_ = snapshot_; + } else { + result.snapshot_ = snapshotBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (tableSchemaBuilder_ == null) { + result.tableSchema_ = tableSchema_; + } else { + result.tableSchema_ = tableSchemaBuilder_.build(); + } + if (regionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { + regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.regionInfo_ = regionInfo_; + } else { + result.regionInfo_ = regionInfoBuilder_.build(); + } + if (parentToChildRegionsPairListBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010)) { + parentToChildRegionsPairList_ = java.util.Collections.unmodifiableList(parentToChildRegionsPairList_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.parentToChildRegionsPairList_ = parentToChildRegionsPairList_; + } else { + result.parentToChildRegionsPairList_ = parentToChildRegionsPairListBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData.getDefaultInstance()) return this; + if (other.hasUserInfo()) { + mergeUserInfo(other.getUserInfo()); + } + if (other.hasSnapshot()) { + mergeSnapshot(other.getSnapshot()); + } + if (other.hasTableSchema()) { + mergeTableSchema(other.getTableSchema()); + } + if (regionInfoBuilder_ == null) { + if (!other.regionInfo_.isEmpty()) { + if (regionInfo_.isEmpty()) { + regionInfo_ = other.regionInfo_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureRegionInfoIsMutable(); + regionInfo_.addAll(other.regionInfo_); + } + onChanged(); + } + } else { + if (!other.regionInfo_.isEmpty()) { + if (regionInfoBuilder_.isEmpty()) { + regionInfoBuilder_.dispose(); + regionInfoBuilder_ = null; + regionInfo_ = other.regionInfo_; + bitField0_ = (bitField0_ & ~0x00000008); + regionInfoBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getRegionInfoFieldBuilder() : null; + } else { + regionInfoBuilder_.addAllMessages(other.regionInfo_); + } + } + } + if (parentToChildRegionsPairListBuilder_ == null) { + if (!other.parentToChildRegionsPairList_.isEmpty()) { + if (parentToChildRegionsPairList_.isEmpty()) { + parentToChildRegionsPairList_ = other.parentToChildRegionsPairList_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureParentToChildRegionsPairListIsMutable(); + parentToChildRegionsPairList_.addAll(other.parentToChildRegionsPairList_); + } + onChanged(); + } + } else { + if (!other.parentToChildRegionsPairList_.isEmpty()) { + if (parentToChildRegionsPairListBuilder_.isEmpty()) { + parentToChildRegionsPairListBuilder_.dispose(); + parentToChildRegionsPairListBuilder_ = null; + parentToChildRegionsPairList_ = other.parentToChildRegionsPairList_; + bitField0_ = (bitField0_ & ~0x00000010); + parentToChildRegionsPairListBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getParentToChildRegionsPairListFieldBuilder() : null; + } else { + parentToChildRegionsPairListBuilder_.addAllMessages(other.parentToChildRegionsPairList_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasUserInfo()) { + + return false; + } + if (!hasSnapshot()) { + + return false; + } + if (!hasTableSchema()) { + + return false; + } + if (!getUserInfo().isInitialized()) { + + return false; + } + if (!getSnapshot().isInitialized()) { + + return false; + } + if (!getTableSchema().isInitialized()) { + + return false; + } + for (int i = 0; i < getRegionInfoCount(); i++) { + if (!getRegionInfo(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getParentToChildRegionsPairListCount(); i++) { + if (!getParentToChildRegionsPairList(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotStateData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.UserInformation user_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_; + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + if (userInfoBuilder_ == null) { + return userInfo_; + } else { + return userInfoBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + userInfo_ = value; + onChanged(); + } else { + userInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public Builder setUserInfo( + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) { + if (userInfoBuilder_ == null) { + userInfo_ = builderForValue.build(); + onChanged(); + } else { + userInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) { + userInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial(); + } else { + userInfo_ = value; + } + onChanged(); + } else { + userInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public Builder clearUserInfo() { + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + onChanged(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getUserInfoFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + if (userInfoBuilder_ != null) { + return userInfoBuilder_.getMessageOrBuilder(); + } else { + return userInfo_; + } + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> + getUserInfoFieldBuilder() { + if (userInfoBuilder_ == null) { + userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>( + userInfo_, + getParentForChildren(), + isClean()); + userInfo_ = null; + } + return userInfoBuilder_; + } + + // required .hbase.pb.SnapshotDescription snapshot = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder> snapshotBuilder_; + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public boolean hasSnapshot() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot() { + if (snapshotBuilder_ == null) { + return snapshot_; + } else { + return snapshotBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public Builder setSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription value) { + if (snapshotBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + snapshot_ = value; + onChanged(); + } else { + snapshotBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public Builder setSnapshot( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder builderForValue) { + if (snapshotBuilder_ == null) { + snapshot_ = builderForValue.build(); + onChanged(); + } else { + snapshotBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public Builder mergeSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription value) { + if (snapshotBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + snapshot_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance()) { + snapshot_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.newBuilder(snapshot_).mergeFrom(value).buildPartial(); + } else { + snapshot_ = value; + } + onChanged(); + } else { + snapshotBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public Builder clearSnapshot() { + if (snapshotBuilder_ == null) { + snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance(); + onChanged(); + } else { + snapshotBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder getSnapshotBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getSnapshotFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() { + if (snapshotBuilder_ != null) { + return snapshotBuilder_.getMessageOrBuilder(); + } else { + return snapshot_; + } + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder> + getSnapshotFieldBuilder() { + if (snapshotBuilder_ == null) { + snapshotBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>( + snapshot_, + getParentForChildren(), + isClean()); + snapshot_ = null; + } + return snapshotBuilder_; + } + + // required .hbase.pb.TableSchema table_schema = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> tableSchemaBuilder_; + /** + * required .hbase.pb.TableSchema table_schema = 3; + */ + public boolean hasTableSchema() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .hbase.pb.TableSchema table_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() { + if (tableSchemaBuilder_ == null) { + return tableSchema_; + } else { + return tableSchemaBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.TableSchema table_schema = 3; + */ + public Builder setTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (tableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableSchema_ = value; + onChanged(); + } else { + tableSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .hbase.pb.TableSchema table_schema = 3; + */ + public Builder setTableSchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (tableSchemaBuilder_ == null) { + tableSchema_ = builderForValue.build(); + onChanged(); + } else { + tableSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .hbase.pb.TableSchema table_schema = 3; + */ + public Builder mergeTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (tableSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + tableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { + tableSchema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(tableSchema_).mergeFrom(value).buildPartial(); + } else { + tableSchema_ = value; + } + onChanged(); + } else { + tableSchemaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .hbase.pb.TableSchema table_schema = 3; + */ + public Builder clearTableSchema() { + if (tableSchemaBuilder_ == null) { + tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + onChanged(); + } else { + tableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * required .hbase.pb.TableSchema table_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getTableSchemaBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getTableSchemaFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.TableSchema table_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() { + if (tableSchemaBuilder_ != null) { + return tableSchemaBuilder_.getMessageOrBuilder(); + } else { + return tableSchema_; + } + } + /** + * required .hbase.pb.TableSchema table_schema = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> + getTableSchemaFieldBuilder() { + if (tableSchemaBuilder_ == null) { + tableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>( + tableSchema_, + getParentForChildren(), + isClean()); + tableSchema_ = null; + } + return tableSchemaBuilder_; + } + + // repeated .hbase.pb.RegionInfo region_info = 4; + private java.util.List regionInfo_ = + java.util.Collections.emptyList(); + private void ensureRegionInfoIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + regionInfo_ = new java.util.ArrayList(regionInfo_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_; + + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public java.util.List getRegionInfoList() { + if (regionInfoBuilder_ == null) { + return java.util.Collections.unmodifiableList(regionInfo_); + } else { + return regionInfoBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public int getRegionInfoCount() { + if (regionInfoBuilder_ == null) { + return regionInfo_.size(); + } else { + return regionInfoBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) { + if (regionInfoBuilder_ == null) { + return regionInfo_.get(index); + } else { + return regionInfoBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public Builder setRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoIsMutable(); + regionInfo_.set(index, value); + onChanged(); + } else { + regionInfoBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public Builder setRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.set(index, builderForValue.build()); + onChanged(); + } else { + regionInfoBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public Builder addRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoIsMutable(); + regionInfo_.add(value); + onChanged(); + } else { + regionInfoBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public Builder addRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoIsMutable(); + regionInfo_.add(index, value); + onChanged(); + } else { + regionInfoBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public Builder addRegionInfo( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.add(builderForValue.build()); + onChanged(); + } else { + regionInfoBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public Builder addRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.add(index, builderForValue.build()); + onChanged(); + } else { + regionInfoBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public Builder addAllRegionInfo( + java.lang.Iterable values) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + super.addAll(values, regionInfo_); + onChanged(); + } else { + regionInfoBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public Builder clearRegionInfo() { + if (regionInfoBuilder_ == null) { + regionInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + regionInfoBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public Builder removeRegionInfo(int index) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.remove(index); + onChanged(); + } else { + regionInfoBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder( + int index) { + return getRegionInfoFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder( + int index) { + if (regionInfoBuilder_ == null) { + return regionInfo_.get(index); } else { + return regionInfoBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public java.util.List + getRegionInfoOrBuilderList() { + if (regionInfoBuilder_ != null) { + return regionInfoBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(regionInfo_); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder() { + return getRegionInfoFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder( + int index) { + return getRegionInfoFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionInfo region_info = 4; + */ + public java.util.List + getRegionInfoBuilderList() { + return getRegionInfoFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionInfoFieldBuilder() { + if (regionInfoBuilder_ == null) { + regionInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + regionInfo_, + ((bitField0_ & 0x00000008) == 0x00000008), + getParentForChildren(), + isClean()); + regionInfo_ = null; + } + return regionInfoBuilder_; + } + + // repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + private java.util.List parentToChildRegionsPairList_ = + java.util.Collections.emptyList(); + private void ensureParentToChildRegionsPairListIsMutable() { + if (!((bitField0_ & 0x00000010) == 0x00000010)) { + parentToChildRegionsPairList_ = new java.util.ArrayList(parentToChildRegionsPairList_); + bitField0_ |= 0x00000010; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPairOrBuilder> parentToChildRegionsPairListBuilder_; + + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public java.util.List getParentToChildRegionsPairListList() { + if (parentToChildRegionsPairListBuilder_ == null) { + return java.util.Collections.unmodifiableList(parentToChildRegionsPairList_); + } else { + return parentToChildRegionsPairListBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public int getParentToChildRegionsPairListCount() { + if (parentToChildRegionsPairListBuilder_ == null) { + return parentToChildRegionsPairList_.size(); + } else { + return parentToChildRegionsPairListBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair getParentToChildRegionsPairList(int index) { + if (parentToChildRegionsPairListBuilder_ == null) { + return parentToChildRegionsPairList_.get(index); + } else { + return parentToChildRegionsPairListBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public Builder setParentToChildRegionsPairList( + int index, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair value) { + if (parentToChildRegionsPairListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParentToChildRegionsPairListIsMutable(); + parentToChildRegionsPairList_.set(index, value); + onChanged(); + } else { + parentToChildRegionsPairListBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public Builder setParentToChildRegionsPairList( + int index, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder builderForValue) { + if (parentToChildRegionsPairListBuilder_ == null) { + ensureParentToChildRegionsPairListIsMutable(); + parentToChildRegionsPairList_.set(index, builderForValue.build()); + onChanged(); + } else { + parentToChildRegionsPairListBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public Builder addParentToChildRegionsPairList(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair value) { + if (parentToChildRegionsPairListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParentToChildRegionsPairListIsMutable(); + parentToChildRegionsPairList_.add(value); + onChanged(); + } else { + parentToChildRegionsPairListBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public Builder addParentToChildRegionsPairList( + int index, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair value) { + if (parentToChildRegionsPairListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParentToChildRegionsPairListIsMutable(); + parentToChildRegionsPairList_.add(index, value); + onChanged(); + } else { + parentToChildRegionsPairListBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public Builder addParentToChildRegionsPairList( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder builderForValue) { + if (parentToChildRegionsPairListBuilder_ == null) { + ensureParentToChildRegionsPairListIsMutable(); + parentToChildRegionsPairList_.add(builderForValue.build()); + onChanged(); + } else { + parentToChildRegionsPairListBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public Builder addParentToChildRegionsPairList( + int index, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder builderForValue) { + if (parentToChildRegionsPairListBuilder_ == null) { + ensureParentToChildRegionsPairListIsMutable(); + parentToChildRegionsPairList_.add(index, builderForValue.build()); + onChanged(); + } else { + parentToChildRegionsPairListBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public Builder addAllParentToChildRegionsPairList( + java.lang.Iterable values) { + if (parentToChildRegionsPairListBuilder_ == null) { + ensureParentToChildRegionsPairListIsMutable(); + super.addAll(values, parentToChildRegionsPairList_); + onChanged(); + } else { + parentToChildRegionsPairListBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public Builder clearParentToChildRegionsPairList() { + if (parentToChildRegionsPairListBuilder_ == null) { + parentToChildRegionsPairList_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + parentToChildRegionsPairListBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public Builder removeParentToChildRegionsPairList(int index) { + if (parentToChildRegionsPairListBuilder_ == null) { + ensureParentToChildRegionsPairListIsMutable(); + parentToChildRegionsPairList_.remove(index); + onChanged(); + } else { + parentToChildRegionsPairListBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder getParentToChildRegionsPairListBuilder( + int index) { + return getParentToChildRegionsPairListFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPairOrBuilder getParentToChildRegionsPairListOrBuilder( + int index) { + if (parentToChildRegionsPairListBuilder_ == null) { + return parentToChildRegionsPairList_.get(index); } else { + return parentToChildRegionsPairListBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public java.util.List + getParentToChildRegionsPairListOrBuilderList() { + if (parentToChildRegionsPairListBuilder_ != null) { + return parentToChildRegionsPairListBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(parentToChildRegionsPairList_); + } + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder addParentToChildRegionsPairListBuilder() { + return getParentToChildRegionsPairListFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder addParentToChildRegionsPairListBuilder( + int index) { + return getParentToChildRegionsPairListFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5; + */ + public java.util.List + getParentToChildRegionsPairListBuilderList() { + return getParentToChildRegionsPairListFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPairOrBuilder> + getParentToChildRegionsPairListFieldBuilder() { + if (parentToChildRegionsPairListBuilder_ == null) { + parentToChildRegionsPairListBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPairOrBuilder>( + parentToChildRegionsPairList_, + ((bitField0_ & 0x00000010) == 0x00000010), + getParentForChildren(), + isClean()); + parentToChildRegionsPairList_ = null; + } + return parentToChildRegionsPairListBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.CloneSnapshotStateData) + } + + static { + defaultInstance = new CloneSnapshotStateData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.CloneSnapshotStateData) + } + + public interface RestoreSnapshotStateDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.UserInformation user_info = 1; + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + boolean hasUserInfo(); + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo(); + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder(); + + // required .hbase.pb.SnapshotDescription snapshot = 2; + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + boolean hasSnapshot(); + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot(); + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder(); + + // required .hbase.pb.TableSchema modified_table_schema = 3; + /** + * required .hbase.pb.TableSchema modified_table_schema = 3; + */ + boolean hasModifiedTableSchema(); + /** + * required .hbase.pb.TableSchema modified_table_schema = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getModifiedTableSchema(); + /** + * required .hbase.pb.TableSchema modified_table_schema = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getModifiedTableSchemaOrBuilder(); + + // repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + java.util.List + getRegionInfoForRestoreList(); + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfoForRestore(int index); + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + int getRegionInfoForRestoreCount(); + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + java.util.List + getRegionInfoForRestoreOrBuilderList(); + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoForRestoreOrBuilder( + int index); + + // repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + java.util.List + getRegionInfoForRemoveList(); + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfoForRemove(int index); + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + int getRegionInfoForRemoveCount(); + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + java.util.List + getRegionInfoForRemoveOrBuilderList(); + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoForRemoveOrBuilder( + int index); + + // repeated .hbase.pb.RegionInfo region_info_for_add = 6; + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + java.util.List + getRegionInfoForAddList(); + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfoForAdd(int index); + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + int getRegionInfoForAddCount(); + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + java.util.List + getRegionInfoForAddOrBuilderList(); + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoForAddOrBuilder( + int index); + + // repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + java.util.List + getParentToChildRegionsPairListList(); + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair getParentToChildRegionsPairList(int index); + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + int getParentToChildRegionsPairListCount(); + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + java.util.List + getParentToChildRegionsPairListOrBuilderList(); + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPairOrBuilder getParentToChildRegionsPairListOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.RestoreSnapshotStateData} + */ + public static final class RestoreSnapshotStateData extends + com.google.protobuf.GeneratedMessage + implements RestoreSnapshotStateDataOrBuilder { + // Use RestoreSnapshotStateData.newBuilder() to construct. + private RestoreSnapshotStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RestoreSnapshotStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RestoreSnapshotStateData defaultInstance; + public static RestoreSnapshotStateData getDefaultInstance() { + return defaultInstance; + } + + public RestoreSnapshotStateData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RestoreSnapshotStateData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = userInfo_.toBuilder(); + } + userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(userInfo_); + userInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = snapshot_.toBuilder(); + } + snapshot_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(snapshot_); + snapshot_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = modifiedTableSchema_.toBuilder(); + } + modifiedTableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(modifiedTableSchema_); + modifiedTableSchema_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + regionInfoForRestore_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + regionInfoForRestore_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry)); + break; + } + case 42: { + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + regionInfoForRemove_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000010; + } + regionInfoForRemove_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry)); + break; + } + case 50: { + if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + regionInfoForAdd_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000020; + } + regionInfoForAdd_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry)); + break; + } + case 58: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + parentToChildRegionsPairList_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + parentToChildRegionsPairList_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + regionInfoForRestore_ = java.util.Collections.unmodifiableList(regionInfoForRestore_); + } + if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + regionInfoForRemove_ = java.util.Collections.unmodifiableList(regionInfoForRemove_); + } + if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + regionInfoForAdd_ = java.util.Collections.unmodifiableList(regionInfoForAdd_); + } + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + parentToChildRegionsPairList_ = java.util.Collections.unmodifiableList(parentToChildRegionsPairList_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_RestoreSnapshotStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_RestoreSnapshotStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RestoreSnapshotStateData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RestoreSnapshotStateData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.UserInformation user_info = 1; + public static final int USER_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_; + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + return userInfo_; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + return userInfo_; + } + + // required .hbase.pb.SnapshotDescription snapshot = 2; + public static final int SNAPSHOT_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription snapshot_; + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public boolean hasSnapshot() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot() { + return snapshot_; + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() { + return snapshot_; + } + + // required .hbase.pb.TableSchema modified_table_schema = 3; + public static final int MODIFIED_TABLE_SCHEMA_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema modifiedTableSchema_; + /** + * required .hbase.pb.TableSchema modified_table_schema = 3; + */ + public boolean hasModifiedTableSchema() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .hbase.pb.TableSchema modified_table_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getModifiedTableSchema() { + return modifiedTableSchema_; + } + /** + * required .hbase.pb.TableSchema modified_table_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getModifiedTableSchemaOrBuilder() { + return modifiedTableSchema_; + } + + // repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + public static final int REGION_INFO_FOR_RESTORE_FIELD_NUMBER = 4; + private java.util.List regionInfoForRestore_; + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public java.util.List getRegionInfoForRestoreList() { + return regionInfoForRestore_; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public java.util.List + getRegionInfoForRestoreOrBuilderList() { + return regionInfoForRestore_; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public int getRegionInfoForRestoreCount() { + return regionInfoForRestore_.size(); + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfoForRestore(int index) { + return regionInfoForRestore_.get(index); + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoForRestoreOrBuilder( + int index) { + return regionInfoForRestore_.get(index); + } + + // repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + public static final int REGION_INFO_FOR_REMOVE_FIELD_NUMBER = 5; + private java.util.List regionInfoForRemove_; + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public java.util.List getRegionInfoForRemoveList() { + return regionInfoForRemove_; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public java.util.List + getRegionInfoForRemoveOrBuilderList() { + return regionInfoForRemove_; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public int getRegionInfoForRemoveCount() { + return regionInfoForRemove_.size(); + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfoForRemove(int index) { + return regionInfoForRemove_.get(index); + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoForRemoveOrBuilder( + int index) { + return regionInfoForRemove_.get(index); + } + + // repeated .hbase.pb.RegionInfo region_info_for_add = 6; + public static final int REGION_INFO_FOR_ADD_FIELD_NUMBER = 6; + private java.util.List regionInfoForAdd_; + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public java.util.List getRegionInfoForAddList() { + return regionInfoForAdd_; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public java.util.List + getRegionInfoForAddOrBuilderList() { + return regionInfoForAdd_; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public int getRegionInfoForAddCount() { + return regionInfoForAdd_.size(); + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfoForAdd(int index) { + return regionInfoForAdd_.get(index); + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoForAddOrBuilder( + int index) { + return regionInfoForAdd_.get(index); + } + + // repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + public static final int PARENT_TO_CHILD_REGIONS_PAIR_LIST_FIELD_NUMBER = 7; + private java.util.List parentToChildRegionsPairList_; + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public java.util.List getParentToChildRegionsPairListList() { + return parentToChildRegionsPairList_; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public java.util.List + getParentToChildRegionsPairListOrBuilderList() { + return parentToChildRegionsPairList_; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public int getParentToChildRegionsPairListCount() { + return parentToChildRegionsPairList_.size(); + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair getParentToChildRegionsPairList(int index) { + return parentToChildRegionsPairList_.get(index); + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPairOrBuilder getParentToChildRegionsPairListOrBuilder( + int index) { + return parentToChildRegionsPairList_.get(index); + } + + private void initFields() { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance(); + modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + regionInfoForRestore_ = java.util.Collections.emptyList(); + regionInfoForRemove_ = java.util.Collections.emptyList(); + regionInfoForAdd_ = java.util.Collections.emptyList(); + parentToChildRegionsPairList_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasUserInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSnapshot()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasModifiedTableSchema()) { + memoizedIsInitialized = 0; + return false; + } + if (!getUserInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getSnapshot().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getModifiedTableSchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getRegionInfoForRestoreCount(); i++) { + if (!getRegionInfoForRestore(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getRegionInfoForRemoveCount(); i++) { + if (!getRegionInfoForRemove(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getRegionInfoForAddCount(); i++) { + if (!getRegionInfoForAdd(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getParentToChildRegionsPairListCount(); i++) { + if (!getParentToChildRegionsPairList(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, snapshot_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, modifiedTableSchema_); + } + for (int i = 0; i < regionInfoForRestore_.size(); i++) { + output.writeMessage(4, regionInfoForRestore_.get(i)); + } + for (int i = 0; i < regionInfoForRemove_.size(); i++) { + output.writeMessage(5, regionInfoForRemove_.get(i)); + } + for (int i = 0; i < regionInfoForAdd_.size(); i++) { + output.writeMessage(6, regionInfoForAdd_.get(i)); + } + for (int i = 0; i < parentToChildRegionsPairList_.size(); i++) { + output.writeMessage(7, parentToChildRegionsPairList_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, snapshot_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, modifiedTableSchema_); + } + for (int i = 0; i < regionInfoForRestore_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, regionInfoForRestore_.get(i)); + } + for (int i = 0; i < regionInfoForRemove_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, regionInfoForRemove_.get(i)); + } + for (int i = 0; i < regionInfoForAdd_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(6, regionInfoForAdd_.get(i)); + } + for (int i = 0; i < parentToChildRegionsPairList_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(7, parentToChildRegionsPairList_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData) obj; + + boolean result = true; + result = result && (hasUserInfo() == other.hasUserInfo()); + if (hasUserInfo()) { + result = result && getUserInfo() + .equals(other.getUserInfo()); + } + result = result && (hasSnapshot() == other.hasSnapshot()); + if (hasSnapshot()) { + result = result && getSnapshot() + .equals(other.getSnapshot()); + } + result = result && (hasModifiedTableSchema() == other.hasModifiedTableSchema()); + if (hasModifiedTableSchema()) { + result = result && getModifiedTableSchema() + .equals(other.getModifiedTableSchema()); + } + result = result && getRegionInfoForRestoreList() + .equals(other.getRegionInfoForRestoreList()); + result = result && getRegionInfoForRemoveList() + .equals(other.getRegionInfoForRemoveList()); + result = result && getRegionInfoForAddList() + .equals(other.getRegionInfoForAddList()); + result = result && getParentToChildRegionsPairListList() + .equals(other.getParentToChildRegionsPairListList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUserInfo()) { + hash = (37 * hash) + USER_INFO_FIELD_NUMBER; + hash = (53 * hash) + getUserInfo().hashCode(); + } + if (hasSnapshot()) { + hash = (37 * hash) + SNAPSHOT_FIELD_NUMBER; + hash = (53 * hash) + getSnapshot().hashCode(); + } + if (hasModifiedTableSchema()) { + hash = (37 * hash) + MODIFIED_TABLE_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getModifiedTableSchema().hashCode(); + } + if (getRegionInfoForRestoreCount() > 0) { + hash = (37 * hash) + REGION_INFO_FOR_RESTORE_FIELD_NUMBER; + hash = (53 * hash) + getRegionInfoForRestoreList().hashCode(); + } + if (getRegionInfoForRemoveCount() > 0) { + hash = (37 * hash) + REGION_INFO_FOR_REMOVE_FIELD_NUMBER; + hash = (53 * hash) + getRegionInfoForRemoveList().hashCode(); + } + if (getRegionInfoForAddCount() > 0) { + hash = (37 * hash) + REGION_INFO_FOR_ADD_FIELD_NUMBER; + hash = (53 * hash) + getRegionInfoForAddList().hashCode(); + } + if (getParentToChildRegionsPairListCount() > 0) { + hash = (37 * hash) + PARENT_TO_CHILD_REGIONS_PAIR_LIST_FIELD_NUMBER; + hash = (53 * hash) + getParentToChildRegionsPairListList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.RestoreSnapshotStateData} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_RestoreSnapshotStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_RestoreSnapshotStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getUserInfoFieldBuilder(); + getSnapshotFieldBuilder(); + getModifiedTableSchemaFieldBuilder(); + getRegionInfoForRestoreFieldBuilder(); + getRegionInfoForRemoveFieldBuilder(); + getRegionInfoForAddFieldBuilder(); + getParentToChildRegionsPairListFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (snapshotBuilder_ == null) { + snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance(); + } else { + snapshotBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (modifiedTableSchemaBuilder_ == null) { + modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } else { + modifiedTableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + if (regionInfoForRestoreBuilder_ == null) { + regionInfoForRestore_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + } else { + regionInfoForRestoreBuilder_.clear(); + } + if (regionInfoForRemoveBuilder_ == null) { + regionInfoForRemove_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + } else { + regionInfoForRemoveBuilder_.clear(); + } + if (regionInfoForAddBuilder_ == null) { + regionInfoForAdd_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + } else { + regionInfoForAddBuilder_.clear(); + } + if (parentToChildRegionsPairListBuilder_ == null) { + parentToChildRegionsPairList_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + } else { + parentToChildRegionsPairListBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_RestoreSnapshotStateData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (userInfoBuilder_ == null) { + result.userInfo_ = userInfo_; + } else { + result.userInfo_ = userInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (snapshotBuilder_ == null) { + result.snapshot_ = snapshot_; + } else { + result.snapshot_ = snapshotBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (modifiedTableSchemaBuilder_ == null) { + result.modifiedTableSchema_ = modifiedTableSchema_; + } else { + result.modifiedTableSchema_ = modifiedTableSchemaBuilder_.build(); + } + if (regionInfoForRestoreBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { + regionInfoForRestore_ = java.util.Collections.unmodifiableList(regionInfoForRestore_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.regionInfoForRestore_ = regionInfoForRestore_; + } else { + result.regionInfoForRestore_ = regionInfoForRestoreBuilder_.build(); + } + if (regionInfoForRemoveBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010)) { + regionInfoForRemove_ = java.util.Collections.unmodifiableList(regionInfoForRemove_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.regionInfoForRemove_ = regionInfoForRemove_; + } else { + result.regionInfoForRemove_ = regionInfoForRemoveBuilder_.build(); + } + if (regionInfoForAddBuilder_ == null) { + if (((bitField0_ & 0x00000020) == 0x00000020)) { + regionInfoForAdd_ = java.util.Collections.unmodifiableList(regionInfoForAdd_); + bitField0_ = (bitField0_ & ~0x00000020); + } + result.regionInfoForAdd_ = regionInfoForAdd_; + } else { + result.regionInfoForAdd_ = regionInfoForAddBuilder_.build(); + } + if (parentToChildRegionsPairListBuilder_ == null) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { + parentToChildRegionsPairList_ = java.util.Collections.unmodifiableList(parentToChildRegionsPairList_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.parentToChildRegionsPairList_ = parentToChildRegionsPairList_; + } else { + result.parentToChildRegionsPairList_ = parentToChildRegionsPairListBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData.getDefaultInstance()) return this; + if (other.hasUserInfo()) { + mergeUserInfo(other.getUserInfo()); + } + if (other.hasSnapshot()) { + mergeSnapshot(other.getSnapshot()); + } + if (other.hasModifiedTableSchema()) { + mergeModifiedTableSchema(other.getModifiedTableSchema()); + } + if (regionInfoForRestoreBuilder_ == null) { + if (!other.regionInfoForRestore_.isEmpty()) { + if (regionInfoForRestore_.isEmpty()) { + regionInfoForRestore_ = other.regionInfoForRestore_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureRegionInfoForRestoreIsMutable(); + regionInfoForRestore_.addAll(other.regionInfoForRestore_); + } + onChanged(); + } + } else { + if (!other.regionInfoForRestore_.isEmpty()) { + if (regionInfoForRestoreBuilder_.isEmpty()) { + regionInfoForRestoreBuilder_.dispose(); + regionInfoForRestoreBuilder_ = null; + regionInfoForRestore_ = other.regionInfoForRestore_; + bitField0_ = (bitField0_ & ~0x00000008); + regionInfoForRestoreBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getRegionInfoForRestoreFieldBuilder() : null; + } else { + regionInfoForRestoreBuilder_.addAllMessages(other.regionInfoForRestore_); + } + } + } + if (regionInfoForRemoveBuilder_ == null) { + if (!other.regionInfoForRemove_.isEmpty()) { + if (regionInfoForRemove_.isEmpty()) { + regionInfoForRemove_ = other.regionInfoForRemove_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureRegionInfoForRemoveIsMutable(); + regionInfoForRemove_.addAll(other.regionInfoForRemove_); + } + onChanged(); + } + } else { + if (!other.regionInfoForRemove_.isEmpty()) { + if (regionInfoForRemoveBuilder_.isEmpty()) { + regionInfoForRemoveBuilder_.dispose(); + regionInfoForRemoveBuilder_ = null; + regionInfoForRemove_ = other.regionInfoForRemove_; + bitField0_ = (bitField0_ & ~0x00000010); + regionInfoForRemoveBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getRegionInfoForRemoveFieldBuilder() : null; + } else { + regionInfoForRemoveBuilder_.addAllMessages(other.regionInfoForRemove_); + } + } + } + if (regionInfoForAddBuilder_ == null) { + if (!other.regionInfoForAdd_.isEmpty()) { + if (regionInfoForAdd_.isEmpty()) { + regionInfoForAdd_ = other.regionInfoForAdd_; + bitField0_ = (bitField0_ & ~0x00000020); + } else { + ensureRegionInfoForAddIsMutable(); + regionInfoForAdd_.addAll(other.regionInfoForAdd_); + } + onChanged(); + } + } else { + if (!other.regionInfoForAdd_.isEmpty()) { + if (regionInfoForAddBuilder_.isEmpty()) { + regionInfoForAddBuilder_.dispose(); + regionInfoForAddBuilder_ = null; + regionInfoForAdd_ = other.regionInfoForAdd_; + bitField0_ = (bitField0_ & ~0x00000020); + regionInfoForAddBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getRegionInfoForAddFieldBuilder() : null; + } else { + regionInfoForAddBuilder_.addAllMessages(other.regionInfoForAdd_); + } + } + } + if (parentToChildRegionsPairListBuilder_ == null) { + if (!other.parentToChildRegionsPairList_.isEmpty()) { + if (parentToChildRegionsPairList_.isEmpty()) { + parentToChildRegionsPairList_ = other.parentToChildRegionsPairList_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureParentToChildRegionsPairListIsMutable(); + parentToChildRegionsPairList_.addAll(other.parentToChildRegionsPairList_); + } + onChanged(); + } + } else { + if (!other.parentToChildRegionsPairList_.isEmpty()) { + if (parentToChildRegionsPairListBuilder_.isEmpty()) { + parentToChildRegionsPairListBuilder_.dispose(); + parentToChildRegionsPairListBuilder_ = null; + parentToChildRegionsPairList_ = other.parentToChildRegionsPairList_; + bitField0_ = (bitField0_ & ~0x00000040); + parentToChildRegionsPairListBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getParentToChildRegionsPairListFieldBuilder() : null; + } else { + parentToChildRegionsPairListBuilder_.addAllMessages(other.parentToChildRegionsPairList_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasUserInfo()) { + + return false; + } + if (!hasSnapshot()) { + + return false; + } + if (!hasModifiedTableSchema()) { + + return false; + } + if (!getUserInfo().isInitialized()) { + + return false; + } + if (!getSnapshot().isInitialized()) { + + return false; + } + if (!getModifiedTableSchema().isInitialized()) { + + return false; + } + for (int i = 0; i < getRegionInfoForRestoreCount(); i++) { + if (!getRegionInfoForRestore(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getRegionInfoForRemoveCount(); i++) { + if (!getRegionInfoForRemove(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getRegionInfoForAddCount(); i++) { + if (!getRegionInfoForAdd(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getParentToChildRegionsPairListCount(); i++) { + if (!getParentToChildRegionsPairList(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotStateData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.UserInformation user_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_; + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + if (userInfoBuilder_ == null) { + return userInfo_; + } else { + return userInfoBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + userInfo_ = value; + onChanged(); + } else { + userInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public Builder setUserInfo( + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) { + if (userInfoBuilder_ == null) { + userInfo_ = builderForValue.build(); + onChanged(); + } else { + userInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) { + userInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial(); + } else { + userInfo_ = value; + } + onChanged(); + } else { + userInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public Builder clearUserInfo() { + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + onChanged(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getUserInfoFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + if (userInfoBuilder_ != null) { + return userInfoBuilder_.getMessageOrBuilder(); + } else { + return userInfo_; + } + } + /** + * required .hbase.pb.UserInformation user_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> + getUserInfoFieldBuilder() { + if (userInfoBuilder_ == null) { + userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>( + userInfo_, + getParentForChildren(), + isClean()); + userInfo_ = null; + } + return userInfoBuilder_; + } + + // required .hbase.pb.SnapshotDescription snapshot = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder> snapshotBuilder_; + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public boolean hasSnapshot() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot() { + if (snapshotBuilder_ == null) { + return snapshot_; + } else { + return snapshotBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public Builder setSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription value) { + if (snapshotBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + snapshot_ = value; + onChanged(); + } else { + snapshotBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public Builder setSnapshot( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder builderForValue) { + if (snapshotBuilder_ == null) { + snapshot_ = builderForValue.build(); + onChanged(); + } else { + snapshotBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public Builder mergeSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription value) { + if (snapshotBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + snapshot_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance()) { + snapshot_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.newBuilder(snapshot_).mergeFrom(value).buildPartial(); + } else { + snapshot_ = value; + } + onChanged(); + } else { + snapshotBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public Builder clearSnapshot() { + if (snapshotBuilder_ == null) { + snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance(); + onChanged(); + } else { + snapshotBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder getSnapshotBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getSnapshotFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() { + if (snapshotBuilder_ != null) { + return snapshotBuilder_.getMessageOrBuilder(); + } else { + return snapshot_; + } + } + /** + * required .hbase.pb.SnapshotDescription snapshot = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder> + getSnapshotFieldBuilder() { + if (snapshotBuilder_ == null) { + snapshotBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>( + snapshot_, + getParentForChildren(), + isClean()); + snapshot_ = null; + } + return snapshotBuilder_; + } + + // required .hbase.pb.TableSchema modified_table_schema = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> modifiedTableSchemaBuilder_; + /** + * required .hbase.pb.TableSchema modified_table_schema = 3; + */ + public boolean hasModifiedTableSchema() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .hbase.pb.TableSchema modified_table_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getModifiedTableSchema() { + if (modifiedTableSchemaBuilder_ == null) { + return modifiedTableSchema_; + } else { + return modifiedTableSchemaBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.TableSchema modified_table_schema = 3; + */ + public Builder setModifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (modifiedTableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + modifiedTableSchema_ = value; + onChanged(); + } else { + modifiedTableSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .hbase.pb.TableSchema modified_table_schema = 3; + */ + public Builder setModifiedTableSchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (modifiedTableSchemaBuilder_ == null) { + modifiedTableSchema_ = builderForValue.build(); + onChanged(); + } else { + modifiedTableSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .hbase.pb.TableSchema modified_table_schema = 3; + */ + public Builder mergeModifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (modifiedTableSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + modifiedTableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { + modifiedTableSchema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(modifiedTableSchema_).mergeFrom(value).buildPartial(); + } else { + modifiedTableSchema_ = value; + } + onChanged(); + } else { + modifiedTableSchemaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .hbase.pb.TableSchema modified_table_schema = 3; + */ + public Builder clearModifiedTableSchema() { + if (modifiedTableSchemaBuilder_ == null) { + modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + onChanged(); + } else { + modifiedTableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * required .hbase.pb.TableSchema modified_table_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getModifiedTableSchemaBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getModifiedTableSchemaFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.TableSchema modified_table_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getModifiedTableSchemaOrBuilder() { + if (modifiedTableSchemaBuilder_ != null) { + return modifiedTableSchemaBuilder_.getMessageOrBuilder(); + } else { + return modifiedTableSchema_; + } + } + /** + * required .hbase.pb.TableSchema modified_table_schema = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> + getModifiedTableSchemaFieldBuilder() { + if (modifiedTableSchemaBuilder_ == null) { + modifiedTableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>( + modifiedTableSchema_, + getParentForChildren(), + isClean()); + modifiedTableSchema_ = null; + } + return modifiedTableSchemaBuilder_; + } + + // repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + private java.util.List regionInfoForRestore_ = + java.util.Collections.emptyList(); + private void ensureRegionInfoForRestoreIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + regionInfoForRestore_ = new java.util.ArrayList(regionInfoForRestore_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoForRestoreBuilder_; + + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public java.util.List getRegionInfoForRestoreList() { + if (regionInfoForRestoreBuilder_ == null) { + return java.util.Collections.unmodifiableList(regionInfoForRestore_); + } else { + return regionInfoForRestoreBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public int getRegionInfoForRestoreCount() { + if (regionInfoForRestoreBuilder_ == null) { + return regionInfoForRestore_.size(); + } else { + return regionInfoForRestoreBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfoForRestore(int index) { + if (regionInfoForRestoreBuilder_ == null) { + return regionInfoForRestore_.get(index); + } else { + return regionInfoForRestoreBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public Builder setRegionInfoForRestore( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoForRestoreBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoForRestoreIsMutable(); + regionInfoForRestore_.set(index, value); + onChanged(); + } else { + regionInfoForRestoreBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public Builder setRegionInfoForRestore( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoForRestoreBuilder_ == null) { + ensureRegionInfoForRestoreIsMutable(); + regionInfoForRestore_.set(index, builderForValue.build()); + onChanged(); + } else { + regionInfoForRestoreBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public Builder addRegionInfoForRestore(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoForRestoreBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoForRestoreIsMutable(); + regionInfoForRestore_.add(value); + onChanged(); + } else { + regionInfoForRestoreBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public Builder addRegionInfoForRestore( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoForRestoreBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoForRestoreIsMutable(); + regionInfoForRestore_.add(index, value); + onChanged(); + } else { + regionInfoForRestoreBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public Builder addRegionInfoForRestore( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoForRestoreBuilder_ == null) { + ensureRegionInfoForRestoreIsMutable(); + regionInfoForRestore_.add(builderForValue.build()); + onChanged(); + } else { + regionInfoForRestoreBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public Builder addRegionInfoForRestore( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoForRestoreBuilder_ == null) { + ensureRegionInfoForRestoreIsMutable(); + regionInfoForRestore_.add(index, builderForValue.build()); + onChanged(); + } else { + regionInfoForRestoreBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public Builder addAllRegionInfoForRestore( + java.lang.Iterable values) { + if (regionInfoForRestoreBuilder_ == null) { + ensureRegionInfoForRestoreIsMutable(); + super.addAll(values, regionInfoForRestore_); + onChanged(); + } else { + regionInfoForRestoreBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public Builder clearRegionInfoForRestore() { + if (regionInfoForRestoreBuilder_ == null) { + regionInfoForRestore_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + regionInfoForRestoreBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public Builder removeRegionInfoForRestore(int index) { + if (regionInfoForRestoreBuilder_ == null) { + ensureRegionInfoForRestoreIsMutable(); + regionInfoForRestore_.remove(index); + onChanged(); + } else { + regionInfoForRestoreBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoForRestoreBuilder( + int index) { + return getRegionInfoForRestoreFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoForRestoreOrBuilder( + int index) { + if (regionInfoForRestoreBuilder_ == null) { + return regionInfoForRestore_.get(index); } else { + return regionInfoForRestoreBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public java.util.List + getRegionInfoForRestoreOrBuilderList() { + if (regionInfoForRestoreBuilder_ != null) { + return regionInfoForRestoreBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(regionInfoForRestore_); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoForRestoreBuilder() { + return getRegionInfoForRestoreFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoForRestoreBuilder( + int index) { + return getRegionInfoForRestoreFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_restore = 4; + */ + public java.util.List + getRegionInfoForRestoreBuilderList() { + return getRegionInfoForRestoreFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionInfoForRestoreFieldBuilder() { + if (regionInfoForRestoreBuilder_ == null) { + regionInfoForRestoreBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + regionInfoForRestore_, + ((bitField0_ & 0x00000008) == 0x00000008), + getParentForChildren(), + isClean()); + regionInfoForRestore_ = null; + } + return regionInfoForRestoreBuilder_; + } + + // repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + private java.util.List regionInfoForRemove_ = + java.util.Collections.emptyList(); + private void ensureRegionInfoForRemoveIsMutable() { + if (!((bitField0_ & 0x00000010) == 0x00000010)) { + regionInfoForRemove_ = new java.util.ArrayList(regionInfoForRemove_); + bitField0_ |= 0x00000010; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoForRemoveBuilder_; + + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public java.util.List getRegionInfoForRemoveList() { + if (regionInfoForRemoveBuilder_ == null) { + return java.util.Collections.unmodifiableList(regionInfoForRemove_); + } else { + return regionInfoForRemoveBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public int getRegionInfoForRemoveCount() { + if (regionInfoForRemoveBuilder_ == null) { + return regionInfoForRemove_.size(); + } else { + return regionInfoForRemoveBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfoForRemove(int index) { + if (regionInfoForRemoveBuilder_ == null) { + return regionInfoForRemove_.get(index); + } else { + return regionInfoForRemoveBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public Builder setRegionInfoForRemove( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoForRemoveBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoForRemoveIsMutable(); + regionInfoForRemove_.set(index, value); + onChanged(); + } else { + regionInfoForRemoveBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public Builder setRegionInfoForRemove( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoForRemoveBuilder_ == null) { + ensureRegionInfoForRemoveIsMutable(); + regionInfoForRemove_.set(index, builderForValue.build()); + onChanged(); + } else { + regionInfoForRemoveBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public Builder addRegionInfoForRemove(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoForRemoveBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoForRemoveIsMutable(); + regionInfoForRemove_.add(value); + onChanged(); + } else { + regionInfoForRemoveBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public Builder addRegionInfoForRemove( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoForRemoveBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoForRemoveIsMutable(); + regionInfoForRemove_.add(index, value); + onChanged(); + } else { + regionInfoForRemoveBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public Builder addRegionInfoForRemove( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoForRemoveBuilder_ == null) { + ensureRegionInfoForRemoveIsMutable(); + regionInfoForRemove_.add(builderForValue.build()); + onChanged(); + } else { + regionInfoForRemoveBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public Builder addRegionInfoForRemove( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoForRemoveBuilder_ == null) { + ensureRegionInfoForRemoveIsMutable(); + regionInfoForRemove_.add(index, builderForValue.build()); + onChanged(); + } else { + regionInfoForRemoveBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public Builder addAllRegionInfoForRemove( + java.lang.Iterable values) { + if (regionInfoForRemoveBuilder_ == null) { + ensureRegionInfoForRemoveIsMutable(); + super.addAll(values, regionInfoForRemove_); + onChanged(); + } else { + regionInfoForRemoveBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public Builder clearRegionInfoForRemove() { + if (regionInfoForRemoveBuilder_ == null) { + regionInfoForRemove_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + regionInfoForRemoveBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public Builder removeRegionInfoForRemove(int index) { + if (regionInfoForRemoveBuilder_ == null) { + ensureRegionInfoForRemoveIsMutable(); + regionInfoForRemove_.remove(index); + onChanged(); + } else { + regionInfoForRemoveBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoForRemoveBuilder( + int index) { + return getRegionInfoForRemoveFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoForRemoveOrBuilder( + int index) { + if (regionInfoForRemoveBuilder_ == null) { + return regionInfoForRemove_.get(index); } else { + return regionInfoForRemoveBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public java.util.List + getRegionInfoForRemoveOrBuilderList() { + if (regionInfoForRemoveBuilder_ != null) { + return regionInfoForRemoveBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(regionInfoForRemove_); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoForRemoveBuilder() { + return getRegionInfoForRemoveFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoForRemoveBuilder( + int index) { + return getRegionInfoForRemoveFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_remove = 5; + */ + public java.util.List + getRegionInfoForRemoveBuilderList() { + return getRegionInfoForRemoveFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionInfoForRemoveFieldBuilder() { + if (regionInfoForRemoveBuilder_ == null) { + regionInfoForRemoveBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + regionInfoForRemove_, + ((bitField0_ & 0x00000010) == 0x00000010), + getParentForChildren(), + isClean()); + regionInfoForRemove_ = null; + } + return regionInfoForRemoveBuilder_; + } + + // repeated .hbase.pb.RegionInfo region_info_for_add = 6; + private java.util.List regionInfoForAdd_ = + java.util.Collections.emptyList(); + private void ensureRegionInfoForAddIsMutable() { + if (!((bitField0_ & 0x00000020) == 0x00000020)) { + regionInfoForAdd_ = new java.util.ArrayList(regionInfoForAdd_); + bitField0_ |= 0x00000020; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoForAddBuilder_; + + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public java.util.List getRegionInfoForAddList() { + if (regionInfoForAddBuilder_ == null) { + return java.util.Collections.unmodifiableList(regionInfoForAdd_); + } else { + return regionInfoForAddBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public int getRegionInfoForAddCount() { + if (regionInfoForAddBuilder_ == null) { + return regionInfoForAdd_.size(); + } else { + return regionInfoForAddBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfoForAdd(int index) { + if (regionInfoForAddBuilder_ == null) { + return regionInfoForAdd_.get(index); + } else { + return regionInfoForAddBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public Builder setRegionInfoForAdd( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoForAddBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoForAddIsMutable(); + regionInfoForAdd_.set(index, value); + onChanged(); + } else { + regionInfoForAddBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public Builder setRegionInfoForAdd( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoForAddBuilder_ == null) { + ensureRegionInfoForAddIsMutable(); + regionInfoForAdd_.set(index, builderForValue.build()); + onChanged(); + } else { + regionInfoForAddBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public Builder addRegionInfoForAdd(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoForAddBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoForAddIsMutable(); + regionInfoForAdd_.add(value); + onChanged(); + } else { + regionInfoForAddBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public Builder addRegionInfoForAdd( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoForAddBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoForAddIsMutable(); + regionInfoForAdd_.add(index, value); + onChanged(); + } else { + regionInfoForAddBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public Builder addRegionInfoForAdd( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoForAddBuilder_ == null) { + ensureRegionInfoForAddIsMutable(); + regionInfoForAdd_.add(builderForValue.build()); + onChanged(); + } else { + regionInfoForAddBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public Builder addRegionInfoForAdd( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoForAddBuilder_ == null) { + ensureRegionInfoForAddIsMutable(); + regionInfoForAdd_.add(index, builderForValue.build()); + onChanged(); + } else { + regionInfoForAddBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public Builder addAllRegionInfoForAdd( + java.lang.Iterable values) { + if (regionInfoForAddBuilder_ == null) { + ensureRegionInfoForAddIsMutable(); + super.addAll(values, regionInfoForAdd_); + onChanged(); + } else { + regionInfoForAddBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public Builder clearRegionInfoForAdd() { + if (regionInfoForAddBuilder_ == null) { + regionInfoForAdd_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000020); + onChanged(); + } else { + regionInfoForAddBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public Builder removeRegionInfoForAdd(int index) { + if (regionInfoForAddBuilder_ == null) { + ensureRegionInfoForAddIsMutable(); + regionInfoForAdd_.remove(index); + onChanged(); + } else { + regionInfoForAddBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoForAddBuilder( + int index) { + return getRegionInfoForAddFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoForAddOrBuilder( + int index) { + if (regionInfoForAddBuilder_ == null) { + return regionInfoForAdd_.get(index); } else { + return regionInfoForAddBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public java.util.List + getRegionInfoForAddOrBuilderList() { + if (regionInfoForAddBuilder_ != null) { + return regionInfoForAddBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(regionInfoForAdd_); + } + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoForAddBuilder() { + return getRegionInfoForAddFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoForAddBuilder( + int index) { + return getRegionInfoForAddFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RegionInfo region_info_for_add = 6; + */ + public java.util.List + getRegionInfoForAddBuilderList() { + return getRegionInfoForAddFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionInfoForAddFieldBuilder() { + if (regionInfoForAddBuilder_ == null) { + regionInfoForAddBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + regionInfoForAdd_, + ((bitField0_ & 0x00000020) == 0x00000020), + getParentForChildren(), + isClean()); + regionInfoForAdd_ = null; + } + return regionInfoForAddBuilder_; + } + + // repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + private java.util.List parentToChildRegionsPairList_ = + java.util.Collections.emptyList(); + private void ensureParentToChildRegionsPairListIsMutable() { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { + parentToChildRegionsPairList_ = new java.util.ArrayList(parentToChildRegionsPairList_); + bitField0_ |= 0x00000040; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPairOrBuilder> parentToChildRegionsPairListBuilder_; + + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public java.util.List getParentToChildRegionsPairListList() { + if (parentToChildRegionsPairListBuilder_ == null) { + return java.util.Collections.unmodifiableList(parentToChildRegionsPairList_); + } else { + return parentToChildRegionsPairListBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public int getParentToChildRegionsPairListCount() { + if (parentToChildRegionsPairListBuilder_ == null) { + return parentToChildRegionsPairList_.size(); + } else { + return parentToChildRegionsPairListBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair getParentToChildRegionsPairList(int index) { + if (parentToChildRegionsPairListBuilder_ == null) { + return parentToChildRegionsPairList_.get(index); + } else { + return parentToChildRegionsPairListBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public Builder setParentToChildRegionsPairList( + int index, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair value) { + if (parentToChildRegionsPairListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParentToChildRegionsPairListIsMutable(); + parentToChildRegionsPairList_.set(index, value); + onChanged(); + } else { + parentToChildRegionsPairListBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public Builder setParentToChildRegionsPairList( + int index, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder builderForValue) { + if (parentToChildRegionsPairListBuilder_ == null) { + ensureParentToChildRegionsPairListIsMutable(); + parentToChildRegionsPairList_.set(index, builderForValue.build()); + onChanged(); + } else { + parentToChildRegionsPairListBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public Builder addParentToChildRegionsPairList(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair value) { + if (parentToChildRegionsPairListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParentToChildRegionsPairListIsMutable(); + parentToChildRegionsPairList_.add(value); + onChanged(); + } else { + parentToChildRegionsPairListBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public Builder addParentToChildRegionsPairList( + int index, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair value) { + if (parentToChildRegionsPairListBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureParentToChildRegionsPairListIsMutable(); + parentToChildRegionsPairList_.add(index, value); + onChanged(); + } else { + parentToChildRegionsPairListBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public Builder addParentToChildRegionsPairList( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder builderForValue) { + if (parentToChildRegionsPairListBuilder_ == null) { + ensureParentToChildRegionsPairListIsMutable(); + parentToChildRegionsPairList_.add(builderForValue.build()); + onChanged(); + } else { + parentToChildRegionsPairListBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public Builder addParentToChildRegionsPairList( + int index, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder builderForValue) { + if (parentToChildRegionsPairListBuilder_ == null) { + ensureParentToChildRegionsPairListIsMutable(); + parentToChildRegionsPairList_.add(index, builderForValue.build()); + onChanged(); + } else { + parentToChildRegionsPairListBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public Builder addAllParentToChildRegionsPairList( + java.lang.Iterable values) { + if (parentToChildRegionsPairListBuilder_ == null) { + ensureParentToChildRegionsPairListIsMutable(); + super.addAll(values, parentToChildRegionsPairList_); + onChanged(); + } else { + parentToChildRegionsPairListBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public Builder clearParentToChildRegionsPairList() { + if (parentToChildRegionsPairListBuilder_ == null) { + parentToChildRegionsPairList_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + } else { + parentToChildRegionsPairListBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public Builder removeParentToChildRegionsPairList(int index) { + if (parentToChildRegionsPairListBuilder_ == null) { + ensureParentToChildRegionsPairListIsMutable(); + parentToChildRegionsPairList_.remove(index); + onChanged(); + } else { + parentToChildRegionsPairListBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder getParentToChildRegionsPairListBuilder( + int index) { + return getParentToChildRegionsPairListFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPairOrBuilder getParentToChildRegionsPairListOrBuilder( + int index) { + if (parentToChildRegionsPairListBuilder_ == null) { + return parentToChildRegionsPairList_.get(index); } else { + return parentToChildRegionsPairListBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public java.util.List + getParentToChildRegionsPairListOrBuilderList() { + if (parentToChildRegionsPairListBuilder_ != null) { + return parentToChildRegionsPairListBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(parentToChildRegionsPairList_); + } + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder addParentToChildRegionsPairListBuilder() { + return getParentToChildRegionsPairListFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder addParentToChildRegionsPairListBuilder( + int index) { + return getParentToChildRegionsPairListFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7; + */ + public java.util.List + getParentToChildRegionsPairListBuilderList() { + return getParentToChildRegionsPairListFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPairOrBuilder> + getParentToChildRegionsPairListFieldBuilder() { + if (parentToChildRegionsPairListBuilder_ == null) { + parentToChildRegionsPairListBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreParentToChildRegionsPairOrBuilder>( + parentToChildRegionsPairList_, + ((bitField0_ & 0x00000040) == 0x00000040), + getParentForChildren(), + isClean()); + parentToChildRegionsPairList_ = null; + } + return parentToChildRegionsPairListBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.RestoreSnapshotStateData) + } + + static { + defaultInstance = new RestoreSnapshotStateData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RestoreSnapshotStateData) + } + + public interface ServerCrashStateDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.ServerName server_name = 1; + /** + * required .hbase.pb.ServerName server_name = 1; + */ + boolean hasServerName(); + /** + * required .hbase.pb.ServerName server_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(); + /** + * required .hbase.pb.ServerName server_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(); + + // optional bool distributed_log_replay = 2; + /** + * optional bool distributed_log_replay = 2; + */ + boolean hasDistributedLogReplay(); + /** + * optional bool distributed_log_replay = 2; + */ + boolean getDistributedLogReplay(); + + // repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; + /** + * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; + */ + java.util.List + getRegionsOnCrashedServerList(); + /** + * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionsOnCrashedServer(int index); + /** + * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; + */ + int getRegionsOnCrashedServerCount(); + /** + * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; + */ + java.util.List + getRegionsOnCrashedServerOrBuilderList(); + /** + * repeated .hbase.pb.RegionInfo regions_on_crashed_server = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionsOnCrashedServerOrBuilder( + int index); + + // repeated .hbase.pb.RegionInfo regions_assigned = 4; + /** + * repeated .hbase.pb.RegionInfo regions_assigned = 4; + */ + java.util.List + getRegionsAssignedList(); + /** + * repeated .hbase.pb.RegionInfo regions_assigned = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionsAssigned(int index); + /** + * repeated .hbase.pb.RegionInfo regions_assigned = 4; + */ + int getRegionsAssignedCount(); + /** + * repeated .hbase.pb.RegionInfo regions_assigned = 4; + */ + java.util.List + getRegionsAssignedOrBuilderList(); + /** + * repeated .hbase.pb.RegionInfo regions_assigned = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionsAssignedOrBuilder( + int index); + + // optional bool carrying_meta = 5; + /** + * optional bool carrying_meta = 5; + */ + boolean hasCarryingMeta(); + /** + * optional bool carrying_meta = 5; + */ + boolean getCarryingMeta(); + + // optional bool should_split_wal = 6 [default = true]; + /** + * optional bool should_split_wal = 6 [default = true]; + */ + boolean hasShouldSplitWal(); + /** + * optional bool should_split_wal = 6 [default = true]; + */ + boolean getShouldSplitWal(); + } + /** + * Protobuf type {@code hbase.pb.ServerCrashStateData} + */ + public static final class ServerCrashStateData extends + com.google.protobuf.GeneratedMessage + implements ServerCrashStateDataOrBuilder { + // Use ServerCrashStateData.newBuilder() to construct. + private ServerCrashStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ServerCrashStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ServerCrashStateData defaultInstance; + public static ServerCrashStateData getDefaultInstance() { + return defaultInstance; + } + + public ServerCrashStateData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ServerCrashStateData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = serverName_.toBuilder(); + } + serverName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(serverName_); + serverName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 16: { + bitField0_ |= 0x00000002; + distributedLogReplay_ = input.readBool(); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + regionsOnCrashedServer_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + regionsOnCrashedServer_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry)); + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + regionsAssigned_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + regionsAssigned_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry)); + break; } case 40: { bitField0_ |= 0x00000004; @@ -15379,6 +20730,21 @@ public final class MasterProcedureProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_DisableTableStateData_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RestoreParentToChildRegionsPair_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_RestoreParentToChildRegionsPair_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_CloneSnapshotStateData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_CloneSnapshotStateData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RestoreSnapshotStateData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_RestoreSnapshotStateData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_ServerCrashStateData_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -15445,91 +20811,123 @@ public final class MasterProcedureProtos { "isableTableStateData\022,\n\tuser_info\030\001 \002(\0132" + "\031.hbase.pb.UserInformation\022\'\n\ntable_name" + "\030\002 \002(\0132\023.hbase.pb.TableName\022\036\n\026skip_tabl" + - "e_state_check\030\003 \002(\010\"\201\002\n\024ServerCrashState" + - "Data\022)\n\013server_name\030\001 \002(\0132\024.hbase.pb.Ser" + - "verName\022\036\n\026distributed_log_replay\030\002 \001(\010\022" + - "7\n\031regions_on_crashed_server\030\003 \003(\0132\024.hba" + - "se.pb.RegionInfo\022.\n\020regions_assigned\030\004 \003" + - "(\0132\024.hbase.pb.RegionInfo\022\025\n\rcarrying_met" + - "a\030\005 \001(\010\022\036\n\020should_split_wal\030\006 \001(\010:\004true*", - "\330\001\n\020CreateTableState\022\036\n\032CREATE_TABLE_PRE" + - "_OPERATION\020\001\022 \n\034CREATE_TABLE_WRITE_FS_LA" + - "YOUT\020\002\022\034\n\030CREATE_TABLE_ADD_TO_META\020\003\022\037\n\033" + - "CREATE_TABLE_ASSIGN_REGIONS\020\004\022\"\n\036CREATE_" + - "TABLE_UPDATE_DESC_CACHE\020\005\022\037\n\033CREATE_TABL" + - "E_POST_OPERATION\020\006*\207\002\n\020ModifyTableState\022" + - "\030\n\024MODIFY_TABLE_PREPARE\020\001\022\036\n\032MODIFY_TABL" + - "E_PRE_OPERATION\020\002\022(\n$MODIFY_TABLE_UPDATE" + - "_TABLE_DESCRIPTOR\020\003\022&\n\"MODIFY_TABLE_REMO" + - "VE_REPLICA_COLUMN\020\004\022!\n\035MODIFY_TABLE_DELE", - "TE_FS_LAYOUT\020\005\022\037\n\033MODIFY_TABLE_POST_OPER" + - "ATION\020\006\022#\n\037MODIFY_TABLE_REOPEN_ALL_REGIO" + - "NS\020\007*\212\002\n\022TruncateTableState\022 \n\034TRUNCATE_" + - "TABLE_PRE_OPERATION\020\001\022#\n\037TRUNCATE_TABLE_" + - "REMOVE_FROM_META\020\002\022\"\n\036TRUNCATE_TABLE_CLE" + - "AR_FS_LAYOUT\020\003\022#\n\037TRUNCATE_TABLE_CREATE_" + - "FS_LAYOUT\020\004\022\036\n\032TRUNCATE_TABLE_ADD_TO_MET" + - "A\020\005\022!\n\035TRUNCATE_TABLE_ASSIGN_REGIONS\020\006\022!" + - "\n\035TRUNCATE_TABLE_POST_OPERATION\020\007*\337\001\n\020De" + - "leteTableState\022\036\n\032DELETE_TABLE_PRE_OPERA", - "TION\020\001\022!\n\035DELETE_TABLE_REMOVE_FROM_META\020" + - "\002\022 \n\034DELETE_TABLE_CLEAR_FS_LAYOUT\020\003\022\"\n\036D" + - "ELETE_TABLE_UPDATE_DESC_CACHE\020\004\022!\n\035DELET" + - "E_TABLE_UNASSIGN_REGIONS\020\005\022\037\n\033DELETE_TAB" + - "LE_POST_OPERATION\020\006*\320\001\n\024CreateNamespaceS" + - "tate\022\034\n\030CREATE_NAMESPACE_PREPARE\020\001\022%\n!CR" + - "EATE_NAMESPACE_CREATE_DIRECTORY\020\002\022)\n%CRE" + - "ATE_NAMESPACE_INSERT_INTO_NS_TABLE\020\003\022\036\n\032" + - "CREATE_NAMESPACE_UPDATE_ZK\020\004\022(\n$CREATE_N" + - "AMESPACE_SET_NAMESPACE_QUOTA\020\005*z\n\024Modify", - "NamespaceState\022\034\n\030MODIFY_NAMESPACE_PREPA" + - "RE\020\001\022$\n MODIFY_NAMESPACE_UPDATE_NS_TABLE" + - "\020\002\022\036\n\032MODIFY_NAMESPACE_UPDATE_ZK\020\003*\332\001\n\024D" + - "eleteNamespaceState\022\034\n\030DELETE_NAMESPACE_" + - "PREPARE\020\001\022)\n%DELETE_NAMESPACE_DELETE_FRO" + - "M_NS_TABLE\020\002\022#\n\037DELETE_NAMESPACE_REMOVE_" + - "FROM_ZK\020\003\022\'\n#DELETE_NAMESPACE_DELETE_DIR" + - "ECTORIES\020\004\022+\n\'DELETE_NAMESPACE_REMOVE_NA" + - "MESPACE_QUOTA\020\005*\331\001\n\024AddColumnFamilyState" + - "\022\035\n\031ADD_COLUMN_FAMILY_PREPARE\020\001\022#\n\037ADD_C", - "OLUMN_FAMILY_PRE_OPERATION\020\002\022-\n)ADD_COLU" + - "MN_FAMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022$\n A" + - "DD_COLUMN_FAMILY_POST_OPERATION\020\004\022(\n$ADD" + - "_COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*\353\001\n\027" + - "ModifyColumnFamilyState\022 \n\034MODIFY_COLUMN" + - "_FAMILY_PREPARE\020\001\022&\n\"MODIFY_COLUMN_FAMIL" + - "Y_PRE_OPERATION\020\002\0220\n,MODIFY_COLUMN_FAMIL" + - "Y_UPDATE_TABLE_DESCRIPTOR\020\003\022\'\n#MODIFY_CO" + - "LUMN_FAMILY_POST_OPERATION\020\004\022+\n\'MODIFY_C" + - "OLUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*\226\002\n\027De", - "leteColumnFamilyState\022 \n\034DELETE_COLUMN_F" + - "AMILY_PREPARE\020\001\022&\n\"DELETE_COLUMN_FAMILY_" + - "PRE_OPERATION\020\002\0220\n,DELETE_COLUMN_FAMILY_" + - "UPDATE_TABLE_DESCRIPTOR\020\003\022)\n%DELETE_COLU" + - "MN_FAMILY_DELETE_FS_LAYOUT\020\004\022\'\n#DELETE_C" + - "OLUMN_FAMILY_POST_OPERATION\020\005\022+\n\'DELETE_" + - "COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\006*\350\001\n\020E" + - "nableTableState\022\030\n\024ENABLE_TABLE_PREPARE\020" + - "\001\022\036\n\032ENABLE_TABLE_PRE_OPERATION\020\002\022)\n%ENA" + - "BLE_TABLE_SET_ENABLING_TABLE_STATE\020\003\022$\n ", - "ENABLE_TABLE_MARK_REGIONS_ONLINE\020\004\022(\n$EN" + - "ABLE_TABLE_SET_ENABLED_TABLE_STATE\020\005\022\037\n\033" + - "ENABLE_TABLE_POST_OPERATION\020\006*\362\001\n\021Disabl" + - "eTableState\022\031\n\025DISABLE_TABLE_PREPARE\020\001\022\037" + - "\n\033DISABLE_TABLE_PRE_OPERATION\020\002\022+\n\'DISAB" + - "LE_TABLE_SET_DISABLING_TABLE_STATE\020\003\022&\n\"" + - "DISABLE_TABLE_MARK_REGIONS_OFFLINE\020\004\022*\n&" + - "DISABLE_TABLE_SET_DISABLED_TABLE_STATE\020\005" + - "\022 \n\034DISABLE_TABLE_POST_OPERATION\020\006*\234\002\n\020S" + - "erverCrashState\022\026\n\022SERVER_CRASH_START\020\001\022", - "\035\n\031SERVER_CRASH_PROCESS_META\020\002\022\034\n\030SERVER" + - "_CRASH_GET_REGIONS\020\003\022\036\n\032SERVER_CRASH_NO_" + - "SPLIT_LOGS\020\004\022\033\n\027SERVER_CRASH_SPLIT_LOGS\020" + - "\005\022#\n\037SERVER_CRASH_PREPARE_LOG_REPLAY\020\006\022\027" + - "\n\023SERVER_CRASH_ASSIGN\020\010\022\037\n\033SERVER_CRASH_" + - "WAIT_ON_ASSIGN\020\t\022\027\n\023SERVER_CRASH_FINISH\020" + - "dBK\n*org.apache.hadoop.hbase.protobuf.ge" + - "neratedB\025MasterProcedureProtosH\001\210\001\001\240\001\001" + "e_state_check\030\003 \002(\010\"u\n\037RestoreParentToCh" + + "ildRegionsPair\022\032\n\022parent_region_name\030\001 \002" + + "(\t\022\032\n\022child1_region_name\030\002 \002(\t\022\032\n\022child2" + + "_region_name\030\003 \002(\t\"\245\002\n\026CloneSnapshotStat" + + "eData\022,\n\tuser_info\030\001 \002(\0132\031.hbase.pb.User" + + "Information\022/\n\010snapshot\030\002 \002(\0132\035.hbase.pb" + + ".SnapshotDescription\022+\n\014table_schema\030\003 \002", + "(\0132\025.hbase.pb.TableSchema\022)\n\013region_info" + + "\030\004 \003(\0132\024.hbase.pb.RegionInfo\022T\n!parent_t" + + "o_child_regions_pair_list\030\005 \003(\0132).hbase." + + "pb.RestoreParentToChildRegionsPair\"\245\003\n\030R" + + "estoreSnapshotStateData\022,\n\tuser_info\030\001 \002" + + "(\0132\031.hbase.pb.UserInformation\022/\n\010snapsho" + + "t\030\002 \002(\0132\035.hbase.pb.SnapshotDescription\0224" + + "\n\025modified_table_schema\030\003 \002(\0132\025.hbase.pb" + + ".TableSchema\0225\n\027region_info_for_restore\030" + + "\004 \003(\0132\024.hbase.pb.RegionInfo\0224\n\026region_in", + "fo_for_remove\030\005 \003(\0132\024.hbase.pb.RegionInf" + + "o\0221\n\023region_info_for_add\030\006 \003(\0132\024.hbase.p" + + "b.RegionInfo\022T\n!parent_to_child_regions_" + + "pair_list\030\007 \003(\0132).hbase.pb.RestoreParent" + + "ToChildRegionsPair\"\201\002\n\024ServerCrashStateD" + + "ata\022)\n\013server_name\030\001 \002(\0132\024.hbase.pb.Serv" + + "erName\022\036\n\026distributed_log_replay\030\002 \001(\010\0227" + + "\n\031regions_on_crashed_server\030\003 \003(\0132\024.hbas" + + "e.pb.RegionInfo\022.\n\020regions_assigned\030\004 \003(" + + "\0132\024.hbase.pb.RegionInfo\022\025\n\rcarrying_meta", + "\030\005 \001(\010\022\036\n\020should_split_wal\030\006 \001(\010:\004true*\330" + + "\001\n\020CreateTableState\022\036\n\032CREATE_TABLE_PRE_" + + "OPERATION\020\001\022 \n\034CREATE_TABLE_WRITE_FS_LAY" + + "OUT\020\002\022\034\n\030CREATE_TABLE_ADD_TO_META\020\003\022\037\n\033C" + + "REATE_TABLE_ASSIGN_REGIONS\020\004\022\"\n\036CREATE_T" + + "ABLE_UPDATE_DESC_CACHE\020\005\022\037\n\033CREATE_TABLE" + + "_POST_OPERATION\020\006*\207\002\n\020ModifyTableState\022\030" + + "\n\024MODIFY_TABLE_PREPARE\020\001\022\036\n\032MODIFY_TABLE" + + "_PRE_OPERATION\020\002\022(\n$MODIFY_TABLE_UPDATE_" + + "TABLE_DESCRIPTOR\020\003\022&\n\"MODIFY_TABLE_REMOV", + "E_REPLICA_COLUMN\020\004\022!\n\035MODIFY_TABLE_DELET" + + "E_FS_LAYOUT\020\005\022\037\n\033MODIFY_TABLE_POST_OPERA" + + "TION\020\006\022#\n\037MODIFY_TABLE_REOPEN_ALL_REGION" + + "S\020\007*\212\002\n\022TruncateTableState\022 \n\034TRUNCATE_T" + + "ABLE_PRE_OPERATION\020\001\022#\n\037TRUNCATE_TABLE_R" + + "EMOVE_FROM_META\020\002\022\"\n\036TRUNCATE_TABLE_CLEA" + + "R_FS_LAYOUT\020\003\022#\n\037TRUNCATE_TABLE_CREATE_F" + + "S_LAYOUT\020\004\022\036\n\032TRUNCATE_TABLE_ADD_TO_META" + + "\020\005\022!\n\035TRUNCATE_TABLE_ASSIGN_REGIONS\020\006\022!\n" + + "\035TRUNCATE_TABLE_POST_OPERATION\020\007*\337\001\n\020Del", + "eteTableState\022\036\n\032DELETE_TABLE_PRE_OPERAT" + + "ION\020\001\022!\n\035DELETE_TABLE_REMOVE_FROM_META\020\002" + + "\022 \n\034DELETE_TABLE_CLEAR_FS_LAYOUT\020\003\022\"\n\036DE" + + "LETE_TABLE_UPDATE_DESC_CACHE\020\004\022!\n\035DELETE" + + "_TABLE_UNASSIGN_REGIONS\020\005\022\037\n\033DELETE_TABL" + + "E_POST_OPERATION\020\006*\320\001\n\024CreateNamespaceSt" + + "ate\022\034\n\030CREATE_NAMESPACE_PREPARE\020\001\022%\n!CRE" + + "ATE_NAMESPACE_CREATE_DIRECTORY\020\002\022)\n%CREA" + + "TE_NAMESPACE_INSERT_INTO_NS_TABLE\020\003\022\036\n\032C" + + "REATE_NAMESPACE_UPDATE_ZK\020\004\022(\n$CREATE_NA", + "MESPACE_SET_NAMESPACE_QUOTA\020\005*z\n\024ModifyN" + + "amespaceState\022\034\n\030MODIFY_NAMESPACE_PREPAR" + + "E\020\001\022$\n MODIFY_NAMESPACE_UPDATE_NS_TABLE\020" + + "\002\022\036\n\032MODIFY_NAMESPACE_UPDATE_ZK\020\003*\332\001\n\024De" + + "leteNamespaceState\022\034\n\030DELETE_NAMESPACE_P" + + "REPARE\020\001\022)\n%DELETE_NAMESPACE_DELETE_FROM" + + "_NS_TABLE\020\002\022#\n\037DELETE_NAMESPACE_REMOVE_F" + + "ROM_ZK\020\003\022\'\n#DELETE_NAMESPACE_DELETE_DIRE" + + "CTORIES\020\004\022+\n\'DELETE_NAMESPACE_REMOVE_NAM" + + "ESPACE_QUOTA\020\005*\331\001\n\024AddColumnFamilyState\022", + "\035\n\031ADD_COLUMN_FAMILY_PREPARE\020\001\022#\n\037ADD_CO" + + "LUMN_FAMILY_PRE_OPERATION\020\002\022-\n)ADD_COLUM" + + "N_FAMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022$\n AD" + + "D_COLUMN_FAMILY_POST_OPERATION\020\004\022(\n$ADD_" + + "COLUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*\353\001\n\027M" + + "odifyColumnFamilyState\022 \n\034MODIFY_COLUMN_" + + "FAMILY_PREPARE\020\001\022&\n\"MODIFY_COLUMN_FAMILY" + + "_PRE_OPERATION\020\002\0220\n,MODIFY_COLUMN_FAMILY" + + "_UPDATE_TABLE_DESCRIPTOR\020\003\022\'\n#MODIFY_COL" + + "UMN_FAMILY_POST_OPERATION\020\004\022+\n\'MODIFY_CO", + "LUMN_FAMILY_REOPEN_ALL_REGIONS\020\005*\226\002\n\027Del" + + "eteColumnFamilyState\022 \n\034DELETE_COLUMN_FA" + + "MILY_PREPARE\020\001\022&\n\"DELETE_COLUMN_FAMILY_P" + + "RE_OPERATION\020\002\0220\n,DELETE_COLUMN_FAMILY_U" + + "PDATE_TABLE_DESCRIPTOR\020\003\022)\n%DELETE_COLUM" + + "N_FAMILY_DELETE_FS_LAYOUT\020\004\022\'\n#DELETE_CO" + + "LUMN_FAMILY_POST_OPERATION\020\005\022+\n\'DELETE_C" + + "OLUMN_FAMILY_REOPEN_ALL_REGIONS\020\006*\350\001\n\020En" + + "ableTableState\022\030\n\024ENABLE_TABLE_PREPARE\020\001" + + "\022\036\n\032ENABLE_TABLE_PRE_OPERATION\020\002\022)\n%ENAB", + "LE_TABLE_SET_ENABLING_TABLE_STATE\020\003\022$\n E" + + "NABLE_TABLE_MARK_REGIONS_ONLINE\020\004\022(\n$ENA" + + "BLE_TABLE_SET_ENABLED_TABLE_STATE\020\005\022\037\n\033E" + + "NABLE_TABLE_POST_OPERATION\020\006*\362\001\n\021Disable" + + "TableState\022\031\n\025DISABLE_TABLE_PREPARE\020\001\022\037\n" + + "\033DISABLE_TABLE_PRE_OPERATION\020\002\022+\n\'DISABL" + + "E_TABLE_SET_DISABLING_TABLE_STATE\020\003\022&\n\"D" + + "ISABLE_TABLE_MARK_REGIONS_OFFLINE\020\004\022*\n&D" + + "ISABLE_TABLE_SET_DISABLED_TABLE_STATE\020\005\022" + + " \n\034DISABLE_TABLE_POST_OPERATION\020\006*\346\001\n\022Cl", + "oneSnapshotState\022 \n\034CLONE_SNAPSHOT_PRE_O" + + "PERATION\020\001\022\"\n\036CLONE_SNAPSHOT_WRITE_FS_LA" + + "YOUT\020\002\022\036\n\032CLONE_SNAPSHOT_ADD_TO_META\020\003\022!" + + "\n\035CLONE_SNAPSHOT_ASSIGN_REGIONS\020\004\022$\n CLO" + + "NE_SNAPSHOT_UPDATE_DESC_CACHE\020\005\022!\n\035CLONE" + + "_SNAPSHOT_POST_OPERATION\020\006*\260\001\n\024RestoreSn" + + "apshotState\022\"\n\036RESTORE_SNAPSHOT_PRE_OPER" + + "ATION\020\001\022,\n(RESTORE_SNAPSHOT_UPDATE_TABLE" + + "_DESCRIPTOR\020\002\022$\n RESTORE_SNAPSHOT_WRITE_" + + "FS_LAYOUT\020\003\022 \n\034RESTORE_SNAPSHOT_UPDATE_M", + "ETA\020\004*\234\002\n\020ServerCrashState\022\026\n\022SERVER_CRA" + + "SH_START\020\001\022\035\n\031SERVER_CRASH_PROCESS_META\020" + + "\002\022\034\n\030SERVER_CRASH_GET_REGIONS\020\003\022\036\n\032SERVE" + + "R_CRASH_NO_SPLIT_LOGS\020\004\022\033\n\027SERVER_CRASH_" + + "SPLIT_LOGS\020\005\022#\n\037SERVER_CRASH_PREPARE_LOG" + + "_REPLAY\020\006\022\027\n\023SERVER_CRASH_ASSIGN\020\010\022\037\n\033SE" + + "RVER_CRASH_WAIT_ON_ASSIGN\020\t\022\027\n\023SERVER_CR" + + "ASH_FINISH\020dBK\n*org.apache.hadoop.hbase." + + "protobuf.generatedB\025MasterProcedureProto" + + "sH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -15608,8 +21006,26 @@ public final class MasterProcedureProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_DisableTableStateData_descriptor, new java.lang.String[] { "UserInfo", "TableName", "SkipTableStateCheck", }); - internal_static_hbase_pb_ServerCrashStateData_descriptor = + internal_static_hbase_pb_RestoreParentToChildRegionsPair_descriptor = getDescriptor().getMessageTypes().get(12); + internal_static_hbase_pb_RestoreParentToChildRegionsPair_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_RestoreParentToChildRegionsPair_descriptor, + new java.lang.String[] { "ParentRegionName", "Child1RegionName", "Child2RegionName", }); + internal_static_hbase_pb_CloneSnapshotStateData_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_hbase_pb_CloneSnapshotStateData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_CloneSnapshotStateData_descriptor, + new java.lang.String[] { "UserInfo", "Snapshot", "TableSchema", "RegionInfo", "ParentToChildRegionsPairList", }); + internal_static_hbase_pb_RestoreSnapshotStateData_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_hbase_pb_RestoreSnapshotStateData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_RestoreSnapshotStateData_descriptor, + new java.lang.String[] { "UserInfo", "Snapshot", "ModifiedTableSchema", "RegionInfoForRestore", "RegionInfoForRemove", "RegionInfoForAdd", "ParentToChildRegionsPairList", }); + internal_static_hbase_pb_ServerCrashStateData_descriptor = + getDescriptor().getMessageTypes().get(15); internal_static_hbase_pb_ServerCrashStateData_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ServerCrashStateData_descriptor, diff --git hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index 073eba9..b91a36b 100644 --- hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -38793,6 +38793,26 @@ public final class MasterProtos { * required .hbase.pb.SnapshotDescription snapshot = 1; */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder(); + + // optional uint64 nonce_group = 2 [default = 0]; + /** + * optional uint64 nonce_group = 2 [default = 0]; + */ + boolean hasNonceGroup(); + /** + * optional uint64 nonce_group = 2 [default = 0]; + */ + long getNonceGroup(); + + // optional uint64 nonce = 3 [default = 0]; + /** + * optional uint64 nonce = 3 [default = 0]; + */ + boolean hasNonce(); + /** + * optional uint64 nonce = 3 [default = 0]; + */ + long getNonce(); } /** * Protobuf type {@code hbase.pb.RestoreSnapshotRequest} @@ -38858,6 +38878,16 @@ public final class MasterProtos { bitField0_ |= 0x00000001; break; } + case 16: { + bitField0_ |= 0x00000002; + nonceGroup_ = input.readUInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + nonce_ = input.readUInt64(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -38920,8 +38950,42 @@ public final class MasterProtos { return snapshot_; } + // optional uint64 nonce_group = 2 [default = 0]; + public static final int NONCE_GROUP_FIELD_NUMBER = 2; + private long nonceGroup_; + /** + * optional uint64 nonce_group = 2 [default = 0]; + */ + public boolean hasNonceGroup() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint64 nonce_group = 2 [default = 0]; + */ + public long getNonceGroup() { + return nonceGroup_; + } + + // optional uint64 nonce = 3 [default = 0]; + public static final int NONCE_FIELD_NUMBER = 3; + private long nonce_; + /** + * optional uint64 nonce = 3 [default = 0]; + */ + public boolean hasNonce() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 nonce = 3 [default = 0]; + */ + public long getNonce() { + return nonce_; + } + private void initFields() { snapshot_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance(); + nonceGroup_ = 0L; + nonce_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -38946,6 +39010,12 @@ public final class MasterProtos { if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, snapshot_); } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, nonceGroup_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, nonce_); + } getUnknownFields().writeTo(output); } @@ -38959,6 +39029,14 @@ public final class MasterProtos { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, snapshot_); } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, nonceGroup_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, nonce_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -38987,6 +39065,16 @@ public final class MasterProtos { result = result && getSnapshot() .equals(other.getSnapshot()); } + result = result && (hasNonceGroup() == other.hasNonceGroup()); + if (hasNonceGroup()) { + result = result && (getNonceGroup() + == other.getNonceGroup()); + } + result = result && (hasNonce() == other.hasNonce()); + if (hasNonce()) { + result = result && (getNonce() + == other.getNonce()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -39004,6 +39092,14 @@ public final class MasterProtos { hash = (37 * hash) + SNAPSHOT_FIELD_NUMBER; hash = (53 * hash) + getSnapshot().hashCode(); } + if (hasNonceGroup()) { + hash = (37 * hash) + NONCE_GROUP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getNonceGroup()); + } + if (hasNonce()) { + hash = (37 * hash) + NONCE_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getNonce()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -39120,6 +39216,10 @@ public final class MasterProtos { snapshotBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); + nonceGroup_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + nonce_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -39156,6 +39256,14 @@ public final class MasterProtos { } else { result.snapshot_ = snapshotBuilder_.build(); } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.nonceGroup_ = nonceGroup_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.nonce_ = nonce_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -39175,6 +39283,12 @@ public final class MasterProtos { if (other.hasSnapshot()) { mergeSnapshot(other.getSnapshot()); } + if (other.hasNonceGroup()) { + setNonceGroup(other.getNonceGroup()); + } + if (other.hasNonce()) { + setNonce(other.getNonce()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -39327,6 +39441,72 @@ public final class MasterProtos { return snapshotBuilder_; } + // optional uint64 nonce_group = 2 [default = 0]; + private long nonceGroup_ ; + /** + * optional uint64 nonce_group = 2 [default = 0]; + */ + public boolean hasNonceGroup() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint64 nonce_group = 2 [default = 0]; + */ + public long getNonceGroup() { + return nonceGroup_; + } + /** + * optional uint64 nonce_group = 2 [default = 0]; + */ + public Builder setNonceGroup(long value) { + bitField0_ |= 0x00000002; + nonceGroup_ = value; + onChanged(); + return this; + } + /** + * optional uint64 nonce_group = 2 [default = 0]; + */ + public Builder clearNonceGroup() { + bitField0_ = (bitField0_ & ~0x00000002); + nonceGroup_ = 0L; + onChanged(); + return this; + } + + // optional uint64 nonce = 3 [default = 0]; + private long nonce_ ; + /** + * optional uint64 nonce = 3 [default = 0]; + */ + public boolean hasNonce() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 nonce = 3 [default = 0]; + */ + public long getNonce() { + return nonce_; + } + /** + * optional uint64 nonce = 3 [default = 0]; + */ + public Builder setNonce(long value) { + bitField0_ |= 0x00000004; + nonce_ = value; + onChanged(); + return this; + } + /** + * optional uint64 nonce = 3 [default = 0]; + */ + public Builder clearNonce() { + bitField0_ = (bitField0_ & ~0x00000004); + nonce_ = 0L; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:hbase.pb.RestoreSnapshotRequest) } @@ -39340,6 +39520,16 @@ public final class MasterProtos { public interface RestoreSnapshotResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { + + // required uint64 proc_id = 1; + /** + * required uint64 proc_id = 1; + */ + boolean hasProcId(); + /** + * required uint64 proc_id = 1; + */ + long getProcId(); } /** * Protobuf type {@code hbase.pb.RestoreSnapshotResponse} @@ -39374,6 +39564,7 @@ public final class MasterProtos { com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -39391,6 +39582,11 @@ public final class MasterProtos { } break; } + case 8: { + bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -39430,13 +39626,35 @@ public final class MasterProtos { return PARSER; } + private int bitField0_; + // required uint64 proc_id = 1; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; + /** + * required uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + private void initFields() { + procId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; + if (!hasProcId()) { + memoizedIsInitialized = 0; + return false; + } memoizedIsInitialized = 1; return true; } @@ -39444,6 +39662,9 @@ public final class MasterProtos { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, procId_); + } getUnknownFields().writeTo(output); } @@ -39453,6 +39674,10 @@ public final class MasterProtos { if (size != -1) return size; size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, procId_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -39476,6 +39701,11 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse) obj; boolean result = true; + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -39489,6 +39719,10 @@ public final class MasterProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -39598,6 +39832,8 @@ public final class MasterProtos { public Builder clear() { super.clear(); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -39624,6 +39860,13 @@ public final class MasterProtos { public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse buildPartial() { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.procId_ = procId_; + result.bitField0_ = to_bitField0_; onBuilt(); return result; } @@ -39639,11 +39882,18 @@ public final class MasterProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse other) { if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { + if (!hasProcId()) { + + return false; + } return true; } @@ -39664,6 +39914,40 @@ public final class MasterProtos { } return this; } + private int bitField0_; + + // required uint64 proc_id = 1; + private long procId_ ; + /** + * required uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + /** + * required uint64 proc_id = 1; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); + return this; + } + /** + * required uint64 proc_id = 1; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000001); + procId_ = 0L; + onChanged(); + return this; + } // @@protoc_insertion_point(builder_scope:hbase.pb.RestoreSnapshotResponse) } @@ -59709,19 +59993,6 @@ public final class MasterProtos { com.google.protobuf.RpcCallback done); /** - * rpc IsRestoreSnapshotDone(.hbase.pb.IsRestoreSnapshotDoneRequest) returns (.hbase.pb.IsRestoreSnapshotDoneResponse); - * - *

-       **
-       * Determine if the snapshot restore is done yet.
-       * 
- */ - public abstract void isRestoreSnapshotDone( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest request, - com.google.protobuf.RpcCallback done); - - /** * rpc ExecProcedure(.hbase.pb.ExecProcedureRequest) returns (.hbase.pb.ExecProcedureResponse); * *
@@ -60246,14 +60517,6 @@ public final class MasterProtos {
         }
 
         @java.lang.Override
-        public  void isRestoreSnapshotDone(
-            com.google.protobuf.RpcController controller,
-            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest request,
-            com.google.protobuf.RpcCallback done) {
-          impl.isRestoreSnapshotDone(controller, request, done);
-        }
-
-        @java.lang.Override
         public  void execProcedure(
             com.google.protobuf.RpcController controller,
             org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request,
@@ -60496,42 +60759,40 @@ public final class MasterProtos {
             case 37:
               return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request);
             case 38:
-              return impl.isRestoreSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest)request);
-            case 39:
               return impl.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request);
-            case 40:
+            case 39:
               return impl.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request);
-            case 41:
+            case 40:
               return impl.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request);
-            case 42:
+            case 41:
               return impl.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request);
-            case 43:
+            case 42:
               return impl.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request);
-            case 44:
+            case 43:
               return impl.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request);
-            case 45:
+            case 44:
               return impl.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request);
-            case 46:
+            case 45:
               return impl.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request);
-            case 47:
+            case 46:
               return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request);
-            case 48:
+            case 47:
               return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request);
-            case 49:
+            case 48:
               return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request);
-            case 50:
+            case 49:
               return impl.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request);
-            case 51:
+            case 50:
               return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request);
-            case 52:
+            case 51:
               return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request);
-            case 53:
+            case 52:
               return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request);
-            case 54:
+            case 53:
               return impl.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request);
-            case 55:
+            case 54:
               return impl.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request);
-            case 56:
+            case 55:
               return impl.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -60624,42 +60885,40 @@ public final class MasterProtos {
             case 37:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance();
             case 38:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
             case 39:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
             case 40:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
-            case 41:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance();
-            case 42:
+            case 41:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance();
-            case 43:
+            case 42:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance();
-            case 44:
+            case 43:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance();
-            case 45:
+            case 44:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance();
-            case 46:
+            case 45:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance();
-            case 47:
+            case 46:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance();
-            case 48:
+            case 47:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance();
-            case 49:
+            case 48:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
-            case 50:
+            case 49:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance();
-            case 51:
+            case 50:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
-            case 52:
+            case 51:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
-            case 53:
+            case 52:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance();
-            case 54:
+            case 53:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance();
-            case 55:
+            case 54:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
-            case 56:
+            case 55:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -60752,42 +61011,40 @@ public final class MasterProtos {
             case 37:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance();
             case 38:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
             case 39:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
             case 40:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
-            case 41:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance();
-            case 42:
+            case 41:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance();
-            case 43:
+            case 42:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance();
-            case 44:
+            case 43:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance();
-            case 45:
+            case 44:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance();
-            case 46:
+            case 45:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance();
-            case 47:
+            case 46:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance();
-            case 48:
+            case 47:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance();
-            case 49:
+            case 48:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
-            case 50:
+            case 49:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance();
+            case 50:
+              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
             case 51:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
             case 52:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
-            case 53:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance();
-            case 54:
+            case 53:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance();
-            case 55:
+            case 54:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
-            case 56:
+            case 55:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -61283,19 +61540,6 @@ public final class MasterProtos {
         com.google.protobuf.RpcCallback done);
 
     /**
-     * rpc IsRestoreSnapshotDone(.hbase.pb.IsRestoreSnapshotDoneRequest) returns (.hbase.pb.IsRestoreSnapshotDoneResponse);
-     *
-     * 
-     **
-     * Determine if the snapshot restore is done yet.
-     * 
- */ - public abstract void isRestoreSnapshotDone( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest request, - com.google.protobuf.RpcCallback done); - - /** * rpc ExecProcedure(.hbase.pb.ExecProcedureRequest) returns (.hbase.pb.ExecProcedureResponse); * *
@@ -61723,96 +61967,91 @@ public final class MasterProtos {
               done));
           return;
         case 38:
-          this.isRestoreSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest)request,
-            com.google.protobuf.RpcUtil.specializeCallback(
-              done));
-          return;
-        case 39:
           this.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 40:
+        case 39:
           this.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 41:
+        case 40:
           this.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 42:
+        case 41:
           this.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 43:
+        case 42:
           this.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 44:
+        case 43:
           this.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 45:
+        case 44:
           this.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 46:
+        case 45:
           this.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 47:
+        case 46:
           this.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 48:
+        case 47:
           this.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 49:
+        case 48:
           this.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 50:
+        case 49:
           this.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 51:
+        case 50:
           this.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 52:
+        case 51:
           this.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 53:
+        case 52:
           this.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 54:
+        case 53:
           this.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 55:
+        case 54:
           this.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
           return;
-        case 56:
+        case 55:
           this.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request,
             com.google.protobuf.RpcUtil.specializeCallback(
               done));
@@ -61908,42 +62147,40 @@ public final class MasterProtos {
         case 37:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance();
         case 38:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
         case 39:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
         case 40:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
-        case 41:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance();
-        case 42:
+        case 41:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance();
-        case 43:
+        case 42:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance();
-        case 44:
+        case 43:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance();
-        case 45:
+        case 44:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance();
-        case 46:
+        case 45:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance();
-        case 47:
+        case 46:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance();
-        case 48:
+        case 47:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance();
-        case 49:
+        case 48:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
-        case 50:
+        case 49:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance();
-        case 51:
+        case 50:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
-        case 52:
+        case 51:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
-        case 53:
+        case 52:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance();
-        case 54:
+        case 53:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance();
-        case 55:
+        case 54:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
-        case 56:
+        case 55:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -62036,42 +62273,40 @@ public final class MasterProtos {
         case 37:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance();
         case 38:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
         case 39:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
         case 40:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
-        case 41:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance();
-        case 42:
+        case 41:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance();
-        case 43:
+        case 42:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance();
-        case 44:
+        case 43:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance();
-        case 45:
+        case 44:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance();
-        case 46:
+        case 45:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance();
-        case 47:
+        case 46:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance();
-        case 48:
+        case 47:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance();
-        case 49:
+        case 48:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
-        case 50:
+        case 49:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance();
+        case 50:
+          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
         case 51:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
         case 52:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
-        case 53:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance();
-        case 54:
+        case 53:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance();
-        case 55:
+        case 54:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
-        case 56:
+        case 55:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -62664,27 +62899,12 @@ public final class MasterProtos {
             org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance()));
       }
 
-      public  void isRestoreSnapshotDone(
-          com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest request,
-          com.google.protobuf.RpcCallback done) {
-        channel.callMethod(
-          getDescriptor().getMethods().get(38),
-          controller,
-          request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance(),
-          com.google.protobuf.RpcUtil.generalizeCallback(
-            done,
-            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse.class,
-            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance()));
-      }
-
       public  void execProcedure(
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(39),
+          getDescriptor().getMethods().get(38),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(),
@@ -62699,7 +62919,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(40),
+          getDescriptor().getMethods().get(39),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(),
@@ -62714,7 +62934,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(41),
+          getDescriptor().getMethods().get(40),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(),
@@ -62729,7 +62949,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(42),
+          getDescriptor().getMethods().get(41),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(),
@@ -62744,7 +62964,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(43),
+          getDescriptor().getMethods().get(42),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(),
@@ -62759,7 +62979,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(44),
+          getDescriptor().getMethods().get(43),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(),
@@ -62774,7 +62994,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(45),
+          getDescriptor().getMethods().get(44),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(),
@@ -62789,7 +63009,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(46),
+          getDescriptor().getMethods().get(45),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(),
@@ -62804,7 +63024,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(47),
+          getDescriptor().getMethods().get(46),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(),
@@ -62819,7 +63039,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(48),
+          getDescriptor().getMethods().get(47),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(),
@@ -62834,7 +63054,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(49),
+          getDescriptor().getMethods().get(48),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(),
@@ -62849,7 +63069,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(50),
+          getDescriptor().getMethods().get(49),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(),
@@ -62864,7 +63084,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(51),
+          getDescriptor().getMethods().get(50),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(),
@@ -62879,7 +63099,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(52),
+          getDescriptor().getMethods().get(51),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(),
@@ -62894,7 +63114,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(53),
+          getDescriptor().getMethods().get(52),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(),
@@ -62909,7 +63129,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(54),
+          getDescriptor().getMethods().get(53),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(),
@@ -62924,7 +63144,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(55),
+          getDescriptor().getMethods().get(54),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(),
@@ -62939,7 +63159,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request,
           com.google.protobuf.RpcCallback done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(56),
+          getDescriptor().getMethods().get(55),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(),
@@ -63146,11 +63366,6 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request)
           throws com.google.protobuf.ServiceException;
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse isRestoreSnapshotDone(
-          com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest request)
-          throws com.google.protobuf.ServiceException;
-
       public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse execProcedure(
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request)
@@ -63705,24 +63920,12 @@ public final class MasterProtos {
       }
 
 
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse isRestoreSnapshotDone(
-          com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest request)
-          throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(38),
-          controller,
-          request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance());
-      }
-
-
       public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse execProcedure(
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(39),
+          getDescriptor().getMethods().get(38),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance());
@@ -63734,7 +63937,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(40),
+          getDescriptor().getMethods().get(39),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance());
@@ -63746,7 +63949,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(41),
+          getDescriptor().getMethods().get(40),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance());
@@ -63758,7 +63961,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(42),
+          getDescriptor().getMethods().get(41),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance());
@@ -63770,7 +63973,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(43),
+          getDescriptor().getMethods().get(42),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance());
@@ -63782,7 +63985,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(44),
+          getDescriptor().getMethods().get(43),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance());
@@ -63794,7 +63997,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(45),
+          getDescriptor().getMethods().get(44),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance());
@@ -63806,7 +64009,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(46),
+          getDescriptor().getMethods().get(45),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance());
@@ -63818,7 +64021,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(47),
+          getDescriptor().getMethods().get(46),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance());
@@ -63830,7 +64033,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(48),
+          getDescriptor().getMethods().get(47),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance());
@@ -63842,7 +64045,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(49),
+          getDescriptor().getMethods().get(48),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance());
@@ -63854,7 +64057,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(50),
+          getDescriptor().getMethods().get(49),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance());
@@ -63866,7 +64069,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(51),
+          getDescriptor().getMethods().get(50),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance());
@@ -63878,7 +64081,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(52),
+          getDescriptor().getMethods().get(51),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance());
@@ -63890,7 +64093,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(53),
+          getDescriptor().getMethods().get(52),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance());
@@ -63902,7 +64105,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(54),
+          getDescriptor().getMethods().get(53),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance());
@@ -63914,7 +64117,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(55),
+          getDescriptor().getMethods().get(54),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance());
@@ -63926,7 +64129,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request)
           throws com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(56),
+          getDescriptor().getMethods().get(55),
           controller,
           request,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance());
@@ -64611,211 +64814,210 @@ public final class MasterProtos {
       "ots\030\001 \003(\0132\035.hbase.pb.SnapshotDescription" +
       "\"H\n\025DeleteSnapshotRequest\022/\n\010snapshot\030\001 " +
       "\002(\0132\035.hbase.pb.SnapshotDescription\"\030\n\026De",
-      "leteSnapshotResponse\"I\n\026RestoreSnapshotR" +
+      "leteSnapshotResponse\"s\n\026RestoreSnapshotR" +
       "equest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.Snap" +
-      "shotDescription\"\031\n\027RestoreSnapshotRespon" +
-      "se\"H\n\025IsSnapshotDoneRequest\022/\n\010snapshot\030" +
-      "\001 \001(\0132\035.hbase.pb.SnapshotDescription\"^\n\026" +
-      "IsSnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:\005fa" +
-      "lse\022/\n\010snapshot\030\002 \001(\0132\035.hbase.pb.Snapsho" +
-      "tDescription\"O\n\034IsRestoreSnapshotDoneReq" +
-      "uest\022/\n\010snapshot\030\001 \001(\0132\035.hbase.pb.Snapsh" +
-      "otDescription\"4\n\035IsRestoreSnapshotDoneRe",
-      "sponse\022\023\n\004done\030\001 \001(\010:\005false\"F\n\033GetSchema" +
-      "AlterStatusRequest\022\'\n\ntable_name\030\001 \002(\0132\023" +
-      ".hbase.pb.TableName\"T\n\034GetSchemaAlterSta" +
-      "tusResponse\022\035\n\025yet_to_update_regions\030\001 \001" +
-      "(\r\022\025\n\rtotal_regions\030\002 \001(\r\"\213\001\n\032GetTableDe" +
-      "scriptorsRequest\022(\n\013table_names\030\001 \003(\0132\023." +
-      "hbase.pb.TableName\022\r\n\005regex\030\002 \001(\t\022!\n\022inc" +
-      "lude_sys_tables\030\003 \001(\010:\005false\022\021\n\tnamespac" +
-      "e\030\004 \001(\t\"J\n\033GetTableDescriptorsResponse\022+" +
-      "\n\014table_schema\030\001 \003(\0132\025.hbase.pb.TableSch",
-      "ema\"[\n\024GetTableNamesRequest\022\r\n\005regex\030\001 \001" +
-      "(\t\022!\n\022include_sys_tables\030\002 \001(\010:\005false\022\021\n" +
-      "\tnamespace\030\003 \001(\t\"A\n\025GetTableNamesRespons" +
-      "e\022(\n\013table_names\030\001 \003(\0132\023.hbase.pb.TableN" +
-      "ame\"?\n\024GetTableStateRequest\022\'\n\ntable_nam" +
-      "e\030\001 \002(\0132\023.hbase.pb.TableName\"B\n\025GetTable" +
-      "StateResponse\022)\n\013table_state\030\001 \002(\0132\024.hba" +
-      "se.pb.TableState\"\031\n\027GetClusterStatusRequ" +
-      "est\"K\n\030GetClusterStatusResponse\022/\n\016clust" +
-      "er_status\030\001 \002(\0132\027.hbase.pb.ClusterStatus",
-      "\"\030\n\026IsMasterRunningRequest\"4\n\027IsMasterRu" +
-      "nningResponse\022\031\n\021is_master_running\030\001 \002(\010" +
-      "\"I\n\024ExecProcedureRequest\0221\n\tprocedure\030\001 " +
-      "\002(\0132\036.hbase.pb.ProcedureDescription\"F\n\025E" +
-      "xecProcedureResponse\022\030\n\020expected_timeout" +
-      "\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n\026IsProcedu" +
-      "reDoneRequest\0221\n\tprocedure\030\001 \001(\0132\036.hbase" +
-      ".pb.ProcedureDescription\"`\n\027IsProcedureD" +
-      "oneResponse\022\023\n\004done\030\001 \001(\010:\005false\0220\n\010snap" +
-      "shot\030\002 \001(\0132\036.hbase.pb.ProcedureDescripti",
-      "on\",\n\031GetProcedureResultRequest\022\017\n\007proc_" +
-      "id\030\001 \002(\004\"\371\001\n\032GetProcedureResultResponse\022" +
-      "9\n\005state\030\001 \002(\0162*.hbase.pb.GetProcedureRe" +
-      "sultResponse.State\022\022\n\nstart_time\030\002 \001(\004\022\023" +
-      "\n\013last_update\030\003 \001(\004\022\016\n\006result\030\004 \001(\014\0224\n\te" +
-      "xception\030\005 \001(\0132!.hbase.pb.ForeignExcepti" +
-      "onMessage\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007RUN" +
-      "NING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortProcedureRe" +
-      "quest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayInterruptIf" +
-      "Running\030\002 \001(\010:\004true\"6\n\026AbortProcedureRes",
-      "ponse\022\034\n\024is_procedure_aborted\030\001 \002(\010\"\027\n\025L" +
-      "istProceduresRequest\"@\n\026ListProceduresRe" +
-      "sponse\022&\n\tprocedure\030\001 \003(\0132\023.hbase.pb.Pro" +
-      "cedure\"\315\001\n\017SetQuotaRequest\022\021\n\tuser_name\030" +
-      "\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnamespace\030\003" +
-      " \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase.pb.Tabl" +
-      "eName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypass_glob" +
-      "als\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hbase.pb.T" +
-      "hrottleRequest\"\022\n\020SetQuotaResponse\"J\n\037Ma" +
-      "jorCompactionTimestampRequest\022\'\n\ntable_n",
-      "ame\030\001 \002(\0132\023.hbase.pb.TableName\"U\n(MajorC" +
-      "ompactionTimestampForRegionRequest\022)\n\006re" +
-      "gion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\"@\n" +
-      " MajorCompactionTimestampResponse\022\034\n\024com" +
-      "paction_timestamp\030\001 \002(\003\"\035\n\033SecurityCapab" +
-      "ilitiesRequest\"\354\001\n\034SecurityCapabilitiesR" +
-      "esponse\022G\n\014capabilities\030\001 \003(\01621.hbase.pb" +
-      ".SecurityCapabilitiesResponse.Capability" +
-      "\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHENTICATION" +
-      "\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n\rAUTHORI",
-      "ZATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003\022\023\n\017CELL" +
-      "_VISIBILITY\020\004*(\n\020MasterSwitchType\022\t\n\005SPL" +
-      "IT\020\000\022\t\n\005MERGE\020\0012\323(\n\rMasterService\022e\n\024Get" +
-      "SchemaAlterStatus\022%.hbase.pb.GetSchemaAl" +
-      "terStatusRequest\032&.hbase.pb.GetSchemaAlt" +
-      "erStatusResponse\022b\n\023GetTableDescriptors\022" +
-      "$.hbase.pb.GetTableDescriptorsRequest\032%." +
-      "hbase.pb.GetTableDescriptorsResponse\022P\n\r" +
-      "GetTableNames\022\036.hbase.pb.GetTableNamesRe" +
-      "quest\032\037.hbase.pb.GetTableNamesResponse\022Y",
-      "\n\020GetClusterStatus\022!.hbase.pb.GetCluster" +
-      "StatusRequest\032\".hbase.pb.GetClusterStatu" +
-      "sResponse\022V\n\017IsMasterRunning\022 .hbase.pb." +
-      "IsMasterRunningRequest\032!.hbase.pb.IsMast" +
-      "erRunningResponse\022D\n\tAddColumn\022\032.hbase.p" +
-      "b.AddColumnRequest\032\033.hbase.pb.AddColumnR" +
-      "esponse\022M\n\014DeleteColumn\022\035.hbase.pb.Delet" +
-      "eColumnRequest\032\036.hbase.pb.DeleteColumnRe" +
-      "sponse\022M\n\014ModifyColumn\022\035.hbase.pb.Modify" +
-      "ColumnRequest\032\036.hbase.pb.ModifyColumnRes",
-      "ponse\022G\n\nMoveRegion\022\033.hbase.pb.MoveRegio" +
-      "nRequest\032\034.hbase.pb.MoveRegionResponse\022k" +
-      "\n\026DispatchMergingRegions\022\'.hbase.pb.Disp" +
-      "atchMergingRegionsRequest\032(.hbase.pb.Dis" +
-      "patchMergingRegionsResponse\022M\n\014AssignReg" +
-      "ion\022\035.hbase.pb.AssignRegionRequest\032\036.hba" +
-      "se.pb.AssignRegionResponse\022S\n\016UnassignRe" +
-      "gion\022\037.hbase.pb.UnassignRegionRequest\032 ." +
-      "hbase.pb.UnassignRegionResponse\022P\n\rOffli" +
-      "neRegion\022\036.hbase.pb.OfflineRegionRequest",
-      "\032\037.hbase.pb.OfflineRegionResponse\022J\n\013Del" +
-      "eteTable\022\034.hbase.pb.DeleteTableRequest\032\035" +
-      ".hbase.pb.DeleteTableResponse\022P\n\rtruncat" +
-      "eTable\022\036.hbase.pb.TruncateTableRequest\032\037" +
-      ".hbase.pb.TruncateTableResponse\022J\n\013Enabl" +
-      "eTable\022\034.hbase.pb.EnableTableRequest\032\035.h" +
-      "base.pb.EnableTableResponse\022M\n\014DisableTa" +
-      "ble\022\035.hbase.pb.DisableTableRequest\032\036.hba" +
-      "se.pb.DisableTableResponse\022J\n\013ModifyTabl" +
-      "e\022\034.hbase.pb.ModifyTableRequest\032\035.hbase.",
-      "pb.ModifyTableResponse\022J\n\013CreateTable\022\034." +
-      "hbase.pb.CreateTableRequest\032\035.hbase.pb.C" +
-      "reateTableResponse\022A\n\010Shutdown\022\031.hbase.p" +
-      "b.ShutdownRequest\032\032.hbase.pb.ShutdownRes" +
-      "ponse\022G\n\nStopMaster\022\033.hbase.pb.StopMaste" +
-      "rRequest\032\034.hbase.pb.StopMasterResponse\022>" +
-      "\n\007Balance\022\030.hbase.pb.BalanceRequest\032\031.hb" +
-      "ase.pb.BalanceResponse\022_\n\022SetBalancerRun" +
-      "ning\022#.hbase.pb.SetBalancerRunningReques" +
-      "t\032$.hbase.pb.SetBalancerRunningResponse\022",
-      "\\\n\021IsBalancerEnabled\022\".hbase.pb.IsBalanc" +
-      "erEnabledRequest\032#.hbase.pb.IsBalancerEn" +
-      "abledResponse\022k\n\026SetSplitOrMergeEnabled\022" +
-      "\'.hbase.pb.SetSplitOrMergeEnabledRequest" +
-      "\032(.hbase.pb.SetSplitOrMergeEnabledRespon" +
-      "se\022h\n\025IsSplitOrMergeEnabled\022&.hbase.pb.I" +
-      "sSplitOrMergeEnabledRequest\032\'.hbase.pb.I" +
-      "sSplitOrMergeEnabledResponse\022D\n\tNormaliz" +
-      "e\022\032.hbase.pb.NormalizeRequest\032\033.hbase.pb" +
-      ".NormalizeResponse\022e\n\024SetNormalizerRunni",
-      "ng\022%.hbase.pb.SetNormalizerRunningReques" +
-      "t\032&.hbase.pb.SetNormalizerRunningRespons" +
-      "e\022b\n\023IsNormalizerEnabled\022$.hbase.pb.IsNo" +
-      "rmalizerEnabledRequest\032%.hbase.pb.IsNorm" +
-      "alizerEnabledResponse\022S\n\016RunCatalogScan\022" +
-      "\037.hbase.pb.RunCatalogScanRequest\032 .hbase" +
-      ".pb.RunCatalogScanResponse\022e\n\024EnableCata" +
-      "logJanitor\022%.hbase.pb.EnableCatalogJanit" +
-      "orRequest\032&.hbase.pb.EnableCatalogJanito" +
-      "rResponse\022n\n\027IsCatalogJanitorEnabled\022(.h",
-      "base.pb.IsCatalogJanitorEnabledRequest\032)" +
-      ".hbase.pb.IsCatalogJanitorEnabledRespons" +
-      "e\022^\n\021ExecMasterService\022#.hbase.pb.Coproc" +
-      "essorServiceRequest\032$.hbase.pb.Coprocess" +
-      "orServiceResponse\022A\n\010Snapshot\022\031.hbase.pb" +
-      ".SnapshotRequest\032\032.hbase.pb.SnapshotResp" +
-      "onse\022h\n\025GetCompletedSnapshots\022&.hbase.pb" +
-      ".GetCompletedSnapshotsRequest\032\'.hbase.pb" +
-      ".GetCompletedSnapshotsResponse\022S\n\016Delete" +
-      "Snapshot\022\037.hbase.pb.DeleteSnapshotReques",
-      "t\032 .hbase.pb.DeleteSnapshotResponse\022S\n\016I" +
-      "sSnapshotDone\022\037.hbase.pb.IsSnapshotDoneR" +
-      "equest\032 .hbase.pb.IsSnapshotDoneResponse" +
-      "\022V\n\017RestoreSnapshot\022 .hbase.pb.RestoreSn" +
-      "apshotRequest\032!.hbase.pb.RestoreSnapshot" +
-      "Response\022h\n\025IsRestoreSnapshotDone\022&.hbas" +
-      "e.pb.IsRestoreSnapshotDoneRequest\032\'.hbas" +
-      "e.pb.IsRestoreSnapshotDoneResponse\022P\n\rEx" +
-      "ecProcedure\022\036.hbase.pb.ExecProcedureRequ" +
-      "est\032\037.hbase.pb.ExecProcedureResponse\022W\n\024",
-      "ExecProcedureWithRet\022\036.hbase.pb.ExecProc" +
-      "edureRequest\032\037.hbase.pb.ExecProcedureRes" +
-      "ponse\022V\n\017IsProcedureDone\022 .hbase.pb.IsPr" +
-      "ocedureDoneRequest\032!.hbase.pb.IsProcedur" +
-      "eDoneResponse\022V\n\017ModifyNamespace\022 .hbase" +
-      ".pb.ModifyNamespaceRequest\032!.hbase.pb.Mo" +
-      "difyNamespaceResponse\022V\n\017CreateNamespace" +
-      "\022 .hbase.pb.CreateNamespaceRequest\032!.hba" +
-      "se.pb.CreateNamespaceResponse\022V\n\017DeleteN" +
-      "amespace\022 .hbase.pb.DeleteNamespaceReque",
-      "st\032!.hbase.pb.DeleteNamespaceResponse\022k\n" +
-      "\026GetNamespaceDescriptor\022\'.hbase.pb.GetNa" +
-      "mespaceDescriptorRequest\032(.hbase.pb.GetN" +
-      "amespaceDescriptorResponse\022q\n\030ListNamesp" +
-      "aceDescriptors\022).hbase.pb.ListNamespaceD" +
-      "escriptorsRequest\032*.hbase.pb.ListNamespa" +
-      "ceDescriptorsResponse\022\206\001\n\037ListTableDescr" +
-      "iptorsByNamespace\0220.hbase.pb.ListTableDe" +
-      "scriptorsByNamespaceRequest\0321.hbase.pb.L" +
-      "istTableDescriptorsByNamespaceResponse\022t",
-      "\n\031ListTableNamesByNamespace\022*.hbase.pb.L" +
-      "istTableNamesByNamespaceRequest\032+.hbase." +
-      "pb.ListTableNamesByNamespaceResponse\022P\n\r" +
-      "GetTableState\022\036.hbase.pb.GetTableStateRe" +
-      "quest\032\037.hbase.pb.GetTableStateResponse\022A" +
-      "\n\010SetQuota\022\031.hbase.pb.SetQuotaRequest\032\032." +
-      "hbase.pb.SetQuotaResponse\022x\n\037getLastMajo" +
-      "rCompactionTimestamp\022).hbase.pb.MajorCom" +
-      "pactionTimestampRequest\032*.hbase.pb.Major" +
-      "CompactionTimestampResponse\022\212\001\n(getLastM",
-      "ajorCompactionTimestampForRegion\0222.hbase" +
-      ".pb.MajorCompactionTimestampForRegionReq" +
-      "uest\032*.hbase.pb.MajorCompactionTimestamp" +
-      "Response\022_\n\022getProcedureResult\022#.hbase.p" +
-      "b.GetProcedureResultRequest\032$.hbase.pb.G" +
-      "etProcedureResultResponse\022h\n\027getSecurity" +
-      "Capabilities\022%.hbase.pb.SecurityCapabili" +
-      "tiesRequest\032&.hbase.pb.SecurityCapabilit" +
-      "iesResponse\022S\n\016AbortProcedure\022\037.hbase.pb" +
-      ".AbortProcedureRequest\032 .hbase.pb.AbortP",
-      "rocedureResponse\022S\n\016ListProcedures\022\037.hba" +
-      "se.pb.ListProceduresRequest\032 .hbase.pb.L" +
-      "istProceduresResponseBB\n*org.apache.hado" +
-      "op.hbase.protobuf.generatedB\014MasterProto" +
-      "sH\001\210\001\001\240\001\001"
+      "shotDescription\022\026\n\013nonce_group\030\002 \001(\004:\0010\022" +
+      "\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027RestoreSnapshotResp" +
+      "onse\022\017\n\007proc_id\030\001 \002(\004\"H\n\025IsSnapshotDoneR" +
+      "equest\022/\n\010snapshot\030\001 \001(\0132\035.hbase.pb.Snap" +
+      "shotDescription\"^\n\026IsSnapshotDoneRespons" +
+      "e\022\023\n\004done\030\001 \001(\010:\005false\022/\n\010snapshot\030\002 \001(\013" +
+      "2\035.hbase.pb.SnapshotDescription\"O\n\034IsRes" +
+      "toreSnapshotDoneRequest\022/\n\010snapshot\030\001 \001(",
+      "\0132\035.hbase.pb.SnapshotDescription\"4\n\035IsRe" +
+      "storeSnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:" +
+      "\005false\"F\n\033GetSchemaAlterStatusRequest\022\'\n" +
+      "\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\"T" +
+      "\n\034GetSchemaAlterStatusResponse\022\035\n\025yet_to" +
+      "_update_regions\030\001 \001(\r\022\025\n\rtotal_regions\030\002" +
+      " \001(\r\"\213\001\n\032GetTableDescriptorsRequest\022(\n\013t" +
+      "able_names\030\001 \003(\0132\023.hbase.pb.TableName\022\r\n" +
+      "\005regex\030\002 \001(\t\022!\n\022include_sys_tables\030\003 \001(\010" +
+      ":\005false\022\021\n\tnamespace\030\004 \001(\t\"J\n\033GetTableDe",
+      "scriptorsResponse\022+\n\014table_schema\030\001 \003(\0132" +
+      "\025.hbase.pb.TableSchema\"[\n\024GetTableNamesR" +
+      "equest\022\r\n\005regex\030\001 \001(\t\022!\n\022include_sys_tab" +
+      "les\030\002 \001(\010:\005false\022\021\n\tnamespace\030\003 \001(\t\"A\n\025G" +
+      "etTableNamesResponse\022(\n\013table_names\030\001 \003(" +
+      "\0132\023.hbase.pb.TableName\"?\n\024GetTableStateR" +
+      "equest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Ta" +
+      "bleName\"B\n\025GetTableStateResponse\022)\n\013tabl" +
+      "e_state\030\001 \002(\0132\024.hbase.pb.TableState\"\031\n\027G" +
+      "etClusterStatusRequest\"K\n\030GetClusterStat",
+      "usResponse\022/\n\016cluster_status\030\001 \002(\0132\027.hba" +
+      "se.pb.ClusterStatus\"\030\n\026IsMasterRunningRe" +
+      "quest\"4\n\027IsMasterRunningResponse\022\031\n\021is_m" +
+      "aster_running\030\001 \002(\010\"I\n\024ExecProcedureRequ" +
+      "est\0221\n\tprocedure\030\001 \002(\0132\036.hbase.pb.Proced" +
+      "ureDescription\"F\n\025ExecProcedureResponse\022" +
+      "\030\n\020expected_timeout\030\001 \001(\003\022\023\n\013return_data" +
+      "\030\002 \001(\014\"K\n\026IsProcedureDoneRequest\0221\n\tproc" +
+      "edure\030\001 \001(\0132\036.hbase.pb.ProcedureDescript" +
+      "ion\"`\n\027IsProcedureDoneResponse\022\023\n\004done\030\001",
+      " \001(\010:\005false\0220\n\010snapshot\030\002 \001(\0132\036.hbase.pb" +
+      ".ProcedureDescription\",\n\031GetProcedureRes" +
+      "ultRequest\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetProce" +
+      "dureResultResponse\0229\n\005state\030\001 \002(\0162*.hbas" +
+      "e.pb.GetProcedureResultResponse.State\022\022\n" +
+      "\nstart_time\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016" +
+      "\n\006result\030\004 \001(\014\0224\n\texception\030\005 \001(\0132!.hbas" +
+      "e.pb.ForeignExceptionMessage\"1\n\005State\022\r\n" +
+      "\tNOT_FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"" +
+      "M\n\025AbortProcedureRequest\022\017\n\007proc_id\030\001 \002(",
+      "\004\022#\n\025mayInterruptIfRunning\030\002 \001(\010:\004true\"6" +
+      "\n\026AbortProcedureResponse\022\034\n\024is_procedure" +
+      "_aborted\030\001 \002(\010\"\027\n\025ListProceduresRequest\"" +
+      "@\n\026ListProceduresResponse\022&\n\tprocedure\030\001" +
+      " \003(\0132\023.hbase.pb.Procedure\"\315\001\n\017SetQuotaRe" +
+      "quest\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser_group\030\002" +
+      " \001(\t\022\021\n\tnamespace\030\003 \001(\t\022\'\n\ntable_name\030\004 " +
+      "\001(\0132\023.hbase.pb.TableName\022\022\n\nremove_all\030\005" +
+      " \001(\010\022\026\n\016bypass_globals\030\006 \001(\010\022+\n\010throttle" +
+      "\030\007 \001(\0132\031.hbase.pb.ThrottleRequest\"\022\n\020Set",
+      "QuotaResponse\"J\n\037MajorCompactionTimestam" +
+      "pRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb." +
+      "TableName\"U\n(MajorCompactionTimestampFor" +
+      "RegionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb" +
+      ".RegionSpecifier\"@\n MajorCompactionTimes" +
+      "tampResponse\022\034\n\024compaction_timestamp\030\001 \002" +
+      "(\003\"\035\n\033SecurityCapabilitiesRequest\"\354\001\n\034Se" +
+      "curityCapabilitiesResponse\022G\n\014capabiliti" +
+      "es\030\001 \003(\01621.hbase.pb.SecurityCapabilities" +
+      "Response.Capability\"\202\001\n\nCapability\022\031\n\025SI",
+      "MPLE_AUTHENTICATION\020\000\022\031\n\025SECURE_AUTHENTI" +
+      "CATION\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTH" +
+      "ORIZATION\020\003\022\023\n\017CELL_VISIBILITY\020\004*(\n\020Mast" +
+      "erSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\351\'\n\rM" +
+      "asterService\022e\n\024GetSchemaAlterStatus\022%.h" +
+      "base.pb.GetSchemaAlterStatusRequest\032&.hb" +
+      "ase.pb.GetSchemaAlterStatusResponse\022b\n\023G" +
+      "etTableDescriptors\022$.hbase.pb.GetTableDe" +
+      "scriptorsRequest\032%.hbase.pb.GetTableDesc" +
+      "riptorsResponse\022P\n\rGetTableNames\022\036.hbase",
+      ".pb.GetTableNamesRequest\032\037.hbase.pb.GetT" +
+      "ableNamesResponse\022Y\n\020GetClusterStatus\022!." +
+      "hbase.pb.GetClusterStatusRequest\032\".hbase" +
+      ".pb.GetClusterStatusResponse\022V\n\017IsMaster" +
+      "Running\022 .hbase.pb.IsMasterRunningReques" +
+      "t\032!.hbase.pb.IsMasterRunningResponse\022D\n\t" +
+      "AddColumn\022\032.hbase.pb.AddColumnRequest\032\033." +
+      "hbase.pb.AddColumnResponse\022M\n\014DeleteColu" +
+      "mn\022\035.hbase.pb.DeleteColumnRequest\032\036.hbas" +
+      "e.pb.DeleteColumnResponse\022M\n\014ModifyColum",
+      "n\022\035.hbase.pb.ModifyColumnRequest\032\036.hbase" +
+      ".pb.ModifyColumnResponse\022G\n\nMoveRegion\022\033" +
+      ".hbase.pb.MoveRegionRequest\032\034.hbase.pb.M" +
+      "oveRegionResponse\022k\n\026DispatchMergingRegi" +
+      "ons\022\'.hbase.pb.DispatchMergingRegionsReq" +
+      "uest\032(.hbase.pb.DispatchMergingRegionsRe" +
+      "sponse\022M\n\014AssignRegion\022\035.hbase.pb.Assign" +
+      "RegionRequest\032\036.hbase.pb.AssignRegionRes" +
+      "ponse\022S\n\016UnassignRegion\022\037.hbase.pb.Unass" +
+      "ignRegionRequest\032 .hbase.pb.UnassignRegi",
+      "onResponse\022P\n\rOfflineRegion\022\036.hbase.pb.O" +
+      "fflineRegionRequest\032\037.hbase.pb.OfflineRe" +
+      "gionResponse\022J\n\013DeleteTable\022\034.hbase.pb.D" +
+      "eleteTableRequest\032\035.hbase.pb.DeleteTable" +
+      "Response\022P\n\rtruncateTable\022\036.hbase.pb.Tru" +
+      "ncateTableRequest\032\037.hbase.pb.TruncateTab" +
+      "leResponse\022J\n\013EnableTable\022\034.hbase.pb.Ena" +
+      "bleTableRequest\032\035.hbase.pb.EnableTableRe" +
+      "sponse\022M\n\014DisableTable\022\035.hbase.pb.Disabl" +
+      "eTableRequest\032\036.hbase.pb.DisableTableRes",
+      "ponse\022J\n\013ModifyTable\022\034.hbase.pb.ModifyTa" +
+      "bleRequest\032\035.hbase.pb.ModifyTableRespons" +
+      "e\022J\n\013CreateTable\022\034.hbase.pb.CreateTableR" +
+      "equest\032\035.hbase.pb.CreateTableResponse\022A\n" +
+      "\010Shutdown\022\031.hbase.pb.ShutdownRequest\032\032.h" +
+      "base.pb.ShutdownResponse\022G\n\nStopMaster\022\033" +
+      ".hbase.pb.StopMasterRequest\032\034.hbase.pb.S" +
+      "topMasterResponse\022>\n\007Balance\022\030.hbase.pb." +
+      "BalanceRequest\032\031.hbase.pb.BalanceRespons" +
+      "e\022_\n\022SetBalancerRunning\022#.hbase.pb.SetBa",
+      "lancerRunningRequest\032$.hbase.pb.SetBalan" +
+      "cerRunningResponse\022\\\n\021IsBalancerEnabled\022" +
+      "\".hbase.pb.IsBalancerEnabledRequest\032#.hb" +
+      "ase.pb.IsBalancerEnabledResponse\022k\n\026SetS" +
+      "plitOrMergeEnabled\022\'.hbase.pb.SetSplitOr" +
+      "MergeEnabledRequest\032(.hbase.pb.SetSplitO" +
+      "rMergeEnabledResponse\022h\n\025IsSplitOrMergeE" +
+      "nabled\022&.hbase.pb.IsSplitOrMergeEnabledR" +
+      "equest\032\'.hbase.pb.IsSplitOrMergeEnabledR" +
+      "esponse\022D\n\tNormalize\022\032.hbase.pb.Normaliz",
+      "eRequest\032\033.hbase.pb.NormalizeResponse\022e\n" +
+      "\024SetNormalizerRunning\022%.hbase.pb.SetNorm" +
+      "alizerRunningRequest\032&.hbase.pb.SetNorma" +
+      "lizerRunningResponse\022b\n\023IsNormalizerEnab" +
+      "led\022$.hbase.pb.IsNormalizerEnabledReques" +
+      "t\032%.hbase.pb.IsNormalizerEnabledResponse" +
+      "\022S\n\016RunCatalogScan\022\037.hbase.pb.RunCatalog" +
+      "ScanRequest\032 .hbase.pb.RunCatalogScanRes" +
+      "ponse\022e\n\024EnableCatalogJanitor\022%.hbase.pb" +
+      ".EnableCatalogJanitorRequest\032&.hbase.pb.",
+      "EnableCatalogJanitorResponse\022n\n\027IsCatalo" +
+      "gJanitorEnabled\022(.hbase.pb.IsCatalogJani" +
+      "torEnabledRequest\032).hbase.pb.IsCatalogJa" +
+      "nitorEnabledResponse\022^\n\021ExecMasterServic" +
+      "e\022#.hbase.pb.CoprocessorServiceRequest\032$" +
+      ".hbase.pb.CoprocessorServiceResponse\022A\n\010" +
+      "Snapshot\022\031.hbase.pb.SnapshotRequest\032\032.hb" +
+      "ase.pb.SnapshotResponse\022h\n\025GetCompletedS" +
+      "napshots\022&.hbase.pb.GetCompletedSnapshot" +
+      "sRequest\032\'.hbase.pb.GetCompletedSnapshot",
+      "sResponse\022S\n\016DeleteSnapshot\022\037.hbase.pb.D" +
+      "eleteSnapshotRequest\032 .hbase.pb.DeleteSn" +
+      "apshotResponse\022S\n\016IsSnapshotDone\022\037.hbase" +
+      ".pb.IsSnapshotDoneRequest\032 .hbase.pb.IsS" +
+      "napshotDoneResponse\022V\n\017RestoreSnapshot\022 " +
+      ".hbase.pb.RestoreSnapshotRequest\032!.hbase" +
+      ".pb.RestoreSnapshotResponse\022P\n\rExecProce" +
+      "dure\022\036.hbase.pb.ExecProcedureRequest\032\037.h" +
+      "base.pb.ExecProcedureResponse\022W\n\024ExecPro" +
+      "cedureWithRet\022\036.hbase.pb.ExecProcedureRe",
+      "quest\032\037.hbase.pb.ExecProcedureResponse\022V" +
+      "\n\017IsProcedureDone\022 .hbase.pb.IsProcedure" +
+      "DoneRequest\032!.hbase.pb.IsProcedureDoneRe" +
+      "sponse\022V\n\017ModifyNamespace\022 .hbase.pb.Mod" +
+      "ifyNamespaceRequest\032!.hbase.pb.ModifyNam" +
+      "espaceResponse\022V\n\017CreateNamespace\022 .hbas" +
+      "e.pb.CreateNamespaceRequest\032!.hbase.pb.C" +
+      "reateNamespaceResponse\022V\n\017DeleteNamespac" +
+      "e\022 .hbase.pb.DeleteNamespaceRequest\032!.hb" +
+      "ase.pb.DeleteNamespaceResponse\022k\n\026GetNam",
+      "espaceDescriptor\022\'.hbase.pb.GetNamespace" +
+      "DescriptorRequest\032(.hbase.pb.GetNamespac" +
+      "eDescriptorResponse\022q\n\030ListNamespaceDesc" +
+      "riptors\022).hbase.pb.ListNamespaceDescript" +
+      "orsRequest\032*.hbase.pb.ListNamespaceDescr" +
+      "iptorsResponse\022\206\001\n\037ListTableDescriptorsB" +
+      "yNamespace\0220.hbase.pb.ListTableDescripto" +
+      "rsByNamespaceRequest\0321.hbase.pb.ListTabl" +
+      "eDescriptorsByNamespaceResponse\022t\n\031ListT" +
+      "ableNamesByNamespace\022*.hbase.pb.ListTabl",
+      "eNamesByNamespaceRequest\032+.hbase.pb.List" +
+      "TableNamesByNamespaceResponse\022P\n\rGetTabl" +
+      "eState\022\036.hbase.pb.GetTableStateRequest\032\037" +
+      ".hbase.pb.GetTableStateResponse\022A\n\010SetQu" +
+      "ota\022\031.hbase.pb.SetQuotaRequest\032\032.hbase.p" +
+      "b.SetQuotaResponse\022x\n\037getLastMajorCompac" +
+      "tionTimestamp\022).hbase.pb.MajorCompaction" +
+      "TimestampRequest\032*.hbase.pb.MajorCompact" +
+      "ionTimestampResponse\022\212\001\n(getLastMajorCom" +
+      "pactionTimestampForRegion\0222.hbase.pb.Maj",
+      "orCompactionTimestampForRegionRequest\032*." +
+      "hbase.pb.MajorCompactionTimestampRespons" +
+      "e\022_\n\022getProcedureResult\022#.hbase.pb.GetPr" +
+      "ocedureResultRequest\032$.hbase.pb.GetProce" +
+      "dureResultResponse\022h\n\027getSecurityCapabil" +
+      "ities\022%.hbase.pb.SecurityCapabilitiesReq" +
+      "uest\032&.hbase.pb.SecurityCapabilitiesResp" +
+      "onse\022S\n\016AbortProcedure\022\037.hbase.pb.AbortP" +
+      "rocedureRequest\032 .hbase.pb.AbortProcedur" +
+      "eResponse\022S\n\016ListProcedures\022\037.hbase.pb.L",
+      "istProceduresRequest\032 .hbase.pb.ListProc" +
+      "eduresResponseBB\n*org.apache.hadoop.hbas" +
+      "e.protobuf.generatedB\014MasterProtosH\001\210\001\001\240" +
+      "\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -65271,13 +65473,13 @@ public final class MasterProtos {
           internal_static_hbase_pb_RestoreSnapshotRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RestoreSnapshotRequest_descriptor,
-              new java.lang.String[] { "Snapshot", });
+              new java.lang.String[] { "Snapshot", "NonceGroup", "Nonce", });
           internal_static_hbase_pb_RestoreSnapshotResponse_descriptor =
             getDescriptor().getMessageTypes().get(75);
           internal_static_hbase_pb_RestoreSnapshotResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_RestoreSnapshotResponse_descriptor,
-              new java.lang.String[] { });
+              new java.lang.String[] { "ProcId", });
           internal_static_hbase_pb_IsSnapshotDoneRequest_descriptor =
             getDescriptor().getMessageTypes().get(76);
           internal_static_hbase_pb_IsSnapshotDoneRequest_fieldAccessorTable = new
diff --git hbase-protocol/src/main/protobuf/Master.proto hbase-protocol/src/main/protobuf/Master.proto
index 79bb862..1f7a3b7 100644
--- hbase-protocol/src/main/protobuf/Master.proto
+++ hbase-protocol/src/main/protobuf/Master.proto
@@ -370,9 +370,12 @@ message DeleteSnapshotResponse {
 
 message RestoreSnapshotRequest {
   required SnapshotDescription snapshot = 1;
+  optional uint64 nonce_group = 2 [default = 0];
+  optional uint64 nonce = 3 [default = 0];
 }
 
 message RestoreSnapshotResponse {
+  required uint64 proc_id = 1;
 }
 
 /* if you don't send the snapshot, then you will get it back
@@ -735,11 +738,6 @@ service MasterService {
   rpc RestoreSnapshot(RestoreSnapshotRequest) returns(RestoreSnapshotResponse);
 
   /**
-   * Determine if the snapshot restore is done yet.
-   */
-  rpc IsRestoreSnapshotDone(IsRestoreSnapshotDoneRequest) returns(IsRestoreSnapshotDoneResponse);
-
-  /**
    * Execute a distributed procedure.
    */
   rpc ExecProcedure(ExecProcedureRequest) returns(ExecProcedureResponse);
diff --git hbase-protocol/src/main/protobuf/MasterProcedure.proto hbase-protocol/src/main/protobuf/MasterProcedure.proto
index 2d2aff4..87aae6a 100644
--- hbase-protocol/src/main/protobuf/MasterProcedure.proto
+++ hbase-protocol/src/main/protobuf/MasterProcedure.proto
@@ -222,6 +222,46 @@ message DisableTableStateData {
   required bool skip_table_state_check = 3;
 }
 
+message RestoreParentToChildRegionsPair {
+  required string parent_region_name = 1;
+  required string child1_region_name = 2;
+  required string child2_region_name = 3;
+}
+
+enum CloneSnapshotState {
+  CLONE_SNAPSHOT_PRE_OPERATION = 1;
+  CLONE_SNAPSHOT_WRITE_FS_LAYOUT = 2;
+  CLONE_SNAPSHOT_ADD_TO_META = 3;
+  CLONE_SNAPSHOT_ASSIGN_REGIONS = 4;
+  CLONE_SNAPSHOT_UPDATE_DESC_CACHE = 5;
+  CLONE_SNAPSHOT_POST_OPERATION = 6;
+}
+
+message CloneSnapshotStateData {
+  required UserInformation user_info = 1;
+  required SnapshotDescription snapshot = 2;
+  required TableSchema table_schema = 3;
+  repeated RegionInfo region_info = 4;
+  repeated RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 5;
+}
+
+enum RestoreSnapshotState {
+  RESTORE_SNAPSHOT_PRE_OPERATION = 1;
+  RESTORE_SNAPSHOT_UPDATE_TABLE_DESCRIPTOR = 2;
+  RESTORE_SNAPSHOT_WRITE_FS_LAYOUT = 3;
+  RESTORE_SNAPSHOT_UPDATE_META = 4;
+}
+
+message RestoreSnapshotStateData {
+  required UserInformation user_info = 1;
+  required SnapshotDescription snapshot = 2;
+  required TableSchema modified_table_schema = 3;
+  repeated RegionInfo region_info_for_restore = 4;
+  repeated RegionInfo region_info_for_remove = 5;
+  repeated RegionInfo region_info_for_add = 6;
+  repeated RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7;
+}
+
 message ServerCrashStateData {
   required ServerName server_name = 1;
   optional bool distributed_log_replay = 2;
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 6a60c2c..b668cfc 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -916,33 +916,6 @@ public class MasterRpcServices extends RSRpcServices
   }
 
   /**
-   * Returns the status of the requested snapshot restore/clone operation.
-   * This method is not exposed to the user, it is just used internally by HBaseAdmin
-   * to verify if the restore is completed.
-   *
-   * No exceptions are thrown if the restore is not running, the result will be "done".
-   *
-   * @return done true if the restore/clone operation is completed.
-   * @throws ServiceException if the operation failed.
-   */
-  @Override
-  public IsRestoreSnapshotDoneResponse isRestoreSnapshotDone(RpcController controller,
-      IsRestoreSnapshotDoneRequest request) throws ServiceException {
-    try {
-      master.checkInitialized();
-      SnapshotDescription snapshot = request.getSnapshot();
-      IsRestoreSnapshotDoneResponse.Builder builder = IsRestoreSnapshotDoneResponse.newBuilder();
-      boolean done = master.snapshotManager.isRestoreDone(snapshot);
-      builder.setDone(done);
-      return builder.build();
-    } catch (ForeignException e) {
-      throw new ServiceException(e.getCause());
-    } catch (IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  /**
    * Checks if the specified snapshot is done.
    * @return true if the snapshot is in file system ready to use,
    *   false if the snapshot is in the process of completing
@@ -1215,8 +1188,9 @@ public class MasterRpcServices extends RSRpcServices
       TableName dstTable = TableName.valueOf(request.getSnapshot().getTable());
       master.getNamespace(dstTable.getNamespaceAsString());
       SnapshotDescription reqSnapshot = request.getSnapshot();
-      master.snapshotManager.restoreSnapshot(reqSnapshot);
-      return RestoreSnapshotResponse.newBuilder().build();
+      long procId = master.snapshotManager.restoreOrCloneSnapshot(
+        reqSnapshot, request.getNonceGroup(), request.getNonce());
+      return RestoreSnapshotResponse.newBuilder().setProcId(procId).build();
     } catch (ForeignException e) {
       throw new ServiceException(e.getCause());
     } catch (IOException e) {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
new file mode 100644
index 0000000..9477177
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
@@ -0,0 +1,522 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableDescriptor;
+import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.errorhandling.ForeignException;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.MetricsSnapshot;
+import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure.CreateHdfsRegions;
+import org.apache.hadoop.hbase.monitoring.MonitoredTask;
+import org.apache.hadoop.hbase.monitoring.TaskMonitor;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotState;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
+import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import com.google.common.base.Preconditions;
+
+@InterfaceAudience.Private
+public class CloneSnapshotProcedure
+    extends StateMachineProcedure
+    implements TableProcedureInterface {
+  private static final Log LOG = LogFactory.getLog(CloneSnapshotProcedure.class);
+
+  private final AtomicBoolean aborted = new AtomicBoolean(false);
+
+  private UserGroupInformation user;
+  private HTableDescriptor hTableDescriptor;
+  private SnapshotDescription snapshot;
+  private List newRegions = null;
+  private Map > parentsToChildrenPairMap =
+    new HashMap>();
+
+  // Monitor
+  private MonitoredTask monitorStatus = null;
+
+  private Boolean traceEnabled = null;
+
+  /**
+   * Constructor (for failover)
+   */
+  public CloneSnapshotProcedure() {
+  }
+
+  /**
+   * Constructor
+   * @param env MasterProcedureEnv
+   * @param hTableDescriptor the table to operate on
+   * @param snapshot snapshot to clone from
+   * @throws IOException
+   */
+  public CloneSnapshotProcedure(
+      final MasterProcedureEnv env,
+      final HTableDescriptor hTableDescriptor,
+      final SnapshotDescription snapshot)
+      throws IOException {
+    this.hTableDescriptor = hTableDescriptor;
+    this.snapshot = snapshot;
+    this.user = env.getRequestUser().getUGI();
+    this.setOwner(this.user.getShortUserName());
+
+    getMonitorStatus();
+  }
+
+  /**
+   * Set up monitor status if it is not created.
+   */
+  private MonitoredTask getMonitorStatus() {
+    if (monitorStatus == null) {
+      monitorStatus = TaskMonitor.get().createStatus("Cloning  snapshot '" + snapshot.getName() +
+        "' to table " + getTableName());
+    }
+    return monitorStatus;
+  }
+
+  @Override
+  protected Flow executeFromState(final MasterProcedureEnv env, final CloneSnapshotState state)
+      throws InterruptedException {
+    if (isTraceEnabled()) {
+      LOG.trace(this + " execute state=" + state);
+    }
+    try {
+      switch (state) {
+        case CLONE_SNAPSHOT_PRE_OPERATION:
+          // Verify if we can clone the table
+          prepareClone(env);
+
+          preCloneSnapshot(env);
+          setNextState(CloneSnapshotState.CLONE_SNAPSHOT_WRITE_FS_LAYOUT);
+          break;
+        case CLONE_SNAPSHOT_WRITE_FS_LAYOUT:
+          newRegions = createFilesystemLayout(env, hTableDescriptor, newRegions);
+          setNextState(CloneSnapshotState.CLONE_SNAPSHOT_ADD_TO_META);
+          break;
+        case CLONE_SNAPSHOT_ADD_TO_META:
+          addRegionsToMeta(env);
+          setNextState(CloneSnapshotState.CLONE_SNAPSHOT_ASSIGN_REGIONS);
+          break;
+        case CLONE_SNAPSHOT_ASSIGN_REGIONS:
+          CreateTableProcedure.assignRegions(env, getTableName(), newRegions);
+          setNextState(CloneSnapshotState.CLONE_SNAPSHOT_UPDATE_DESC_CACHE);
+          break;
+        case CLONE_SNAPSHOT_UPDATE_DESC_CACHE:
+          CreateTableProcedure.updateTableDescCache(env, getTableName());
+          setNextState(CloneSnapshotState.CLONE_SNAPSHOT_POST_OPERATION);
+          break;
+        case CLONE_SNAPSHOT_POST_OPERATION:
+          postCloneSnapshot(env);
+
+          MetricsSnapshot metricsSnapshot = new MetricsSnapshot();
+          metricsSnapshot.addSnapshotClone(
+            getMonitorStatus().getCompletionTimestamp() - getMonitorStatus().getStartTime());
+          getMonitorStatus().markComplete("Clone snapshot '"+ snapshot.getName() +"' completed!");
+          return Flow.NO_MORE_STATE;
+        default:
+          throw new UnsupportedOperationException("unhandled state=" + state);
+      }
+    } catch (IOException e) {
+      LOG.error("Error trying to create table=" + getTableName() + " state=" + state, e);
+      setFailure("master-create-table", e);
+    }
+    return Flow.HAS_MORE_STATE;
+  }
+
+  @Override
+  protected void rollbackState(final MasterProcedureEnv env, final CloneSnapshotState state)
+      throws IOException {
+    if (isTraceEnabled()) {
+      LOG.trace(this + " rollback state=" + state);
+    }
+    try {
+      switch (state) {
+        case CLONE_SNAPSHOT_POST_OPERATION:
+          // TODO-MAYBE: call the deleteTable coprocessor event?
+          break;
+        case CLONE_SNAPSHOT_UPDATE_DESC_CACHE:
+          DeleteTableProcedure.deleteTableDescriptorCache(env, getTableName());
+          break;
+        case CLONE_SNAPSHOT_ASSIGN_REGIONS:
+          DeleteTableProcedure.deleteAssignmentState(env, getTableName());
+          break;
+        case CLONE_SNAPSHOT_ADD_TO_META:
+          DeleteTableProcedure.deleteFromMeta(env, getTableName(), newRegions);
+          break;
+        case CLONE_SNAPSHOT_WRITE_FS_LAYOUT:
+          DeleteTableProcedure.deleteFromFs(env, getTableName(), newRegions, false);
+          break;
+        case CLONE_SNAPSHOT_PRE_OPERATION:
+          DeleteTableProcedure.deleteTableStates(env, getTableName());
+          // TODO-MAYBE: call the deleteTable coprocessor event?
+          break;
+        default:
+          throw new UnsupportedOperationException("unhandled state=" + state);
+      }
+    } catch (IOException e) {
+      // This will be retried. Unless there is a bug in the code,
+      // this should be just a "temporary error" (e.g. network down)
+      LOG.warn("Failed rollback attempt step=" + state + " table=" + getTableName(), e);
+      throw e;
+    }
+  }
+
+  @Override
+  protected CloneSnapshotState getState(final int stateId) {
+    return CloneSnapshotState.valueOf(stateId);
+  }
+
+  @Override
+  protected int getStateId(final CloneSnapshotState state) {
+    return state.getNumber();
+  }
+
+  @Override
+  protected CloneSnapshotState getInitialState() {
+    return CloneSnapshotState.CLONE_SNAPSHOT_PRE_OPERATION;
+  }
+
+  @Override
+  protected void setNextState(final CloneSnapshotState state) {
+    if (aborted.get()) {
+      setAbortFailure("clone-snapshot", "abort requested");
+    } else {
+      super.setNextState(state);
+    }
+  }
+
+  @Override
+  public TableName getTableName() {
+    return hTableDescriptor.getTableName();
+  }
+
+  @Override
+  public TableOperationType getTableOperationType() {
+    return TableOperationType.CREATE; // Clone is creating a table
+  }
+
+  @Override
+  public boolean abort(final MasterProcedureEnv env) {
+    aborted.set(true);
+    return true;
+  }
+
+  @Override
+  public void toStringClassDetails(StringBuilder sb) {
+    sb.append(getClass().getSimpleName());
+    sb.append(" (table=");
+    sb.append(getTableName());
+    sb.append(" snapshot=");
+    sb.append(snapshot);
+    sb.append(")");
+  }
+
+  @Override
+  public void serializeStateData(final OutputStream stream) throws IOException {
+    super.serializeStateData(stream);
+
+    MasterProcedureProtos.CloneSnapshotStateData.Builder cloneSnapshotMsg =
+      MasterProcedureProtos.CloneSnapshotStateData.newBuilder()
+        .setUserInfo(MasterProcedureUtil.toProtoUserInfo(this.user))
+        .setSnapshot(this.snapshot)
+        .setTableSchema(hTableDescriptor.convert());
+    if (newRegions != null) {
+      for (HRegionInfo hri: newRegions) {
+        cloneSnapshotMsg.addRegionInfo(HRegionInfo.convert(hri));
+      }
+    }
+    if (!parentsToChildrenPairMap.isEmpty()) {
+      final Iterator>> it =
+        parentsToChildrenPairMap.entrySet().iterator();
+      while (it.hasNext()) {
+        final Map.Entry> entry = it.next();
+
+        MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder parentToChildrenPair =
+          MasterProcedureProtos.RestoreParentToChildRegionsPair.newBuilder()
+          .setParentRegionName(entry.getKey())
+          .setChild1RegionName(entry.getValue().getFirst())
+          .setChild2RegionName(entry.getValue().getSecond());
+        cloneSnapshotMsg.addParentToChildRegionsPairList(parentToChildrenPair);
+      }
+    }
+    cloneSnapshotMsg.build().writeDelimitedTo(stream);
+  }
+
+  @Override
+  public void deserializeStateData(final InputStream stream) throws IOException {
+    super.deserializeStateData(stream);
+
+    MasterProcedureProtos.CloneSnapshotStateData cloneSnapshotMsg =
+      MasterProcedureProtos.CloneSnapshotStateData.parseDelimitedFrom(stream);
+    user = MasterProcedureUtil.toUserInfo(cloneSnapshotMsg.getUserInfo());
+    snapshot = cloneSnapshotMsg.getSnapshot();
+    hTableDescriptor = HTableDescriptor.convert(cloneSnapshotMsg.getTableSchema());
+    if (cloneSnapshotMsg.getRegionInfoCount() == 0) {
+      newRegions = null;
+    } else {
+      newRegions = new ArrayList(cloneSnapshotMsg.getRegionInfoCount());
+      for (HBaseProtos.RegionInfo hri: cloneSnapshotMsg.getRegionInfoList()) {
+        newRegions.add(HRegionInfo.convert(hri));
+      }
+    }
+    if (cloneSnapshotMsg.getParentToChildRegionsPairListCount() > 0) {
+      parentsToChildrenPairMap = new HashMap>();
+      for (MasterProcedureProtos.RestoreParentToChildRegionsPair parentToChildrenPair:
+        cloneSnapshotMsg.getParentToChildRegionsPairListList()) {
+        parentsToChildrenPairMap.put(
+          parentToChildrenPair.getParentRegionName(),
+          new Pair(
+            parentToChildrenPair.getChild1RegionName(),
+            parentToChildrenPair.getChild2RegionName()));
+      }
+    }
+    // Make sure that the monitor status is set up
+    getMonitorStatus();
+  }
+
+  @Override
+  protected boolean acquireLock(final MasterProcedureEnv env) {
+    if (env.waitInitialized(this)) {
+      return false;
+    }
+    return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, getTableName());
+  }
+
+  @Override
+  protected void releaseLock(final MasterProcedureEnv env) {
+    env.getProcedureQueue().releaseTableExclusiveLock(this, getTableName());
+  }
+
+  /**
+   * Action before any real action of cloning from snapshot.
+   * @param env MasterProcedureEnv
+   * @throws IOException
+   */
+  private void prepareClone(final MasterProcedureEnv env) throws IOException {
+    final TableName tableName = getTableName();
+    if (MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
+      throw new TableExistsException(getTableName());
+    }
+  }
+
+  /**
+   * Action before cloning from snapshot.
+   * @param env MasterProcedureEnv
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  private void preCloneSnapshot(final MasterProcedureEnv env)
+      throws IOException, InterruptedException {
+    if (!getTableName().isSystemTable()) {
+      // Check and update namespace quota
+      final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
+
+      SnapshotManifest manifest = SnapshotManifest.open(
+        env.getMasterConfiguration(),
+        mfs.getFileSystem(),
+        SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, mfs.getRootDir()),
+        snapshot);
+
+      ProcedureSyncWait.getMasterQuotaManager(env)
+        .checkNamespaceTableAndRegionQuota(getTableName(), manifest.getRegionManifestsMap().size());
+    }
+
+    final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+    if (cpHost != null) {
+      user.doAs(new PrivilegedExceptionAction() {
+        @Override
+        public Void run() throws Exception {
+          cpHost.preCreateTableHandler(hTableDescriptor, null);
+          return null;
+        }
+      });
+    }
+  }
+
+  /**
+   * Action after cloning from snapshot.
+   * @param env MasterProcedureEnv
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  private void postCloneSnapshot(final MasterProcedureEnv env)
+      throws IOException, InterruptedException {
+    final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+    if (cpHost != null) {
+      final HRegionInfo[] regions = (newRegions == null) ? null :
+        newRegions.toArray(new HRegionInfo[newRegions.size()]);
+      user.doAs(new PrivilegedExceptionAction() {
+        @Override
+        public Void run() throws Exception {
+          cpHost.postCreateTableHandler(hTableDescriptor, regions);
+          return null;
+        }
+      });
+    }
+  }
+
+  /**
+   * Create regions in file system.
+   * @param env MasterProcedureEnv
+   * @throws IOException
+   */
+  private List createFilesystemLayout(
+    final MasterProcedureEnv env,
+    final HTableDescriptor hTableDescriptor,
+    final List newRegions) throws IOException {
+    return createFsLayout(env, hTableDescriptor, newRegions, new CreateHdfsRegions() {
+      @Override
+      public List createHdfsRegions(
+        final MasterProcedureEnv env,
+        final Path tableRootDir, final TableName tableName,
+        final List newRegions) throws IOException {
+
+        final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
+        final FileSystem fs = mfs.getFileSystem();
+        final Path rootDir = mfs.getRootDir();
+        final Configuration conf = env.getMasterConfiguration();
+        final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher();
+
+        getMonitorStatus().setStatus("Clone snapshot - creating regions for table: " + tableName);
+
+        try {
+          // 1. Execute the on-disk Clone
+          Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
+          SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshot);
+          RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(
+            conf, fs, manifest, hTableDescriptor, tableRootDir, monitorException, monitorStatus);
+          RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
+
+          // Clone operation should not have stuff to restore or remove
+          Preconditions.checkArgument(
+            !metaChanges.hasRegionsToRestore(), "A clone should not have regions to restore");
+          Preconditions.checkArgument(
+            !metaChanges.hasRegionsToRemove(), "A clone should not have regions to remove");
+
+          // At this point the clone is complete. Next step is enabling the table.
+          String msg =
+            "Clone snapshot="+ snapshot.getName() +" on table=" + tableName + " completed!";
+          LOG.info(msg);
+          monitorStatus.setStatus(msg + " Waiting for table to be enabled...");
+
+          // 2. Let the next step to add the regions to meta
+          return metaChanges.getRegionsToAdd();
+        } catch (Exception e) {
+          String msg = "clone snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) +
+            " failed because " + e.getMessage();
+          LOG.error(msg, e);
+          IOException rse = new RestoreSnapshotException(msg, e, snapshot);
+
+          // these handlers aren't futures so we need to register the error here.
+          monitorException.receive(new ForeignException("Master CloneSnapshotProcedure", rse));
+          throw rse;
+        }
+      }
+    });
+  }
+
+  /**
+   * Create region layout in file system.
+   * @param env MasterProcedureEnv
+   * @throws IOException
+   */
+  private List createFsLayout(
+    final MasterProcedureEnv env,
+    final HTableDescriptor hTableDescriptor,
+    List newRegions,
+    final CreateHdfsRegions hdfsRegionHandler) throws IOException {
+    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
+    final Path tempdir = mfs.getTempDir();
+
+    // 1. Create Table Descriptor
+    // using a copy of descriptor, table will be created enabling first
+    TableDescriptor underConstruction = new TableDescriptor(hTableDescriptor);
+    final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
+    ((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
+      .createTableDescriptorForTableDirectory(tempTableDir, underConstruction, false);
+
+    // 2. Create Regions
+    newRegions = hdfsRegionHandler.createHdfsRegions(
+      env, tempdir, hTableDescriptor.getTableName(), newRegions);
+
+    // 3. Move Table temp directory to the hbase root location
+    CreateTableProcedure.moveTempDirectoryToHBaseRoot(env, hTableDescriptor, tempTableDir);
+
+    return newRegions;
+  }
+
+  /**
+   * Add regions to hbase:meta table.
+   * @param env MasterProcedureEnv
+   * @throws IOException
+   */
+  private void addRegionsToMeta(final MasterProcedureEnv env) throws IOException {
+    newRegions = CreateTableProcedure.addTableToMeta(env, hTableDescriptor, newRegions);
+
+    RestoreSnapshotHelper.RestoreMetaChanges metaChanges =
+        new RestoreSnapshotHelper.RestoreMetaChanges(
+          hTableDescriptor, parentsToChildrenPairMap);
+    metaChanges.updateMetaParentRegions(env.getMasterServices().getConnection(), newRegions);
+  }
+
+  /**
+   * The procedure could be restarted from a different machine. If the variable is null, we need to
+   * retrieve it.
+   * @return traceEnabled
+   */
+  private Boolean isTraceEnabled() {
+    if (traceEnabled == null) {
+      traceEnabled = LOG.isTraceEnabled();
+    }
+    return traceEnabled;
+  }
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 8ce8335..f262edb 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -299,7 +299,8 @@ public class CreateTableProcedure
       throws IOException, InterruptedException {
     if (!getTableName().isSystemTable()) {
       ProcedureSyncWait.getMasterQuotaManager(env)
-        .checkNamespaceTableAndRegionQuota(getTableName(), newRegions.size());
+        .checkNamespaceTableAndRegionQuota(
+          getTableName(), (newRegions != null ? newRegions.size() : 0));
     }
 
     final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
@@ -373,6 +374,16 @@ public class CreateTableProcedure
       hTableDescriptor.getTableName(), newRegions);
 
     // 3. Move Table temp directory to the hbase root location
+    moveTempDirectoryToHBaseRoot(env, hTableDescriptor, tempTableDir);
+
+    return newRegions;
+  }
+
+  protected static void moveTempDirectoryToHBaseRoot(
+    final MasterProcedureEnv env,
+    final HTableDescriptor hTableDescriptor,
+    final Path tempTableDir) throws IOException {
+    final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
     final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), hTableDescriptor.getTableName());
     FileSystem fs = mfs.getFileSystem();
     if (!fs.delete(tableDir, true) && fs.exists(tableDir)) {
@@ -382,7 +393,6 @@ public class CreateTableProcedure
       throw new IOException("Unable to move table from temp=" + tempTableDir +
         " to hbase root=" + tableDir);
     }
-    return newRegions;
   }
 
   protected static List addTableToMeta(final MasterProcedureEnv env,
@@ -446,7 +456,7 @@ public class CreateTableProcedure
   /**
    * Add the specified set of regions to the hbase:meta table.
    */
-  protected static void addRegionsToMeta(final MasterProcedureEnv env,
+  private static void addRegionsToMeta(final MasterProcedureEnv env,
       final HTableDescriptor hTableDescriptor,
       final List regionInfos) throws IOException {
     MetaTableAccessor.addRegionsToMeta(env.getMasterServices().getConnection(),
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
index abfb776..19bd015 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
@@ -165,4 +165,17 @@ public final class MasterDDLOperationHelper {
     }
     return done;
   }
+
+  /**
+   * Get the region info list of a table from meta if it is not already known by the caller.
+   **/
+  public static List getRegionInfoList(
+    final MasterProcedureEnv env,
+    final TableName tableName,
+    List regionInfoList) throws IOException {
+    if (regionInfoList == null) {
+      regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, tableName);
+    }
+    return regionInfoList;
+  }
 }
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java
new file mode 100644
index 0000000..1dc8944
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java
@@ -0,0 +1,526 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.procedure;
+
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.errorhandling.ForeignException;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.MetricsSnapshot;
+import org.apache.hadoop.hbase.master.RegionStates;
+import org.apache.hadoop.hbase.monitoring.MonitoredTask;
+import org.apache.hadoop.hbase.monitoring.TaskMonitor;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.security.UserGroupInformation;
+
+@InterfaceAudience.Private
+public class RestoreSnapshotProcedure
+    extends StateMachineProcedure
+    implements TableProcedureInterface {
+  private static final Log LOG = LogFactory.getLog(RestoreSnapshotProcedure.class);
+
+  private final AtomicBoolean aborted = new AtomicBoolean(false);
+
+  private HTableDescriptor modifiedHTableDescriptor;
+  private List regionsToRestore = null;
+  private List regionsToRemove = null;
+  private List regionsToAdd = null;
+  private Map> parentsToChildrenPairMap =
+    new HashMap>();
+
+  private UserGroupInformation user;
+  private SnapshotDescription snapshot;
+
+  // Monitor
+  private MonitoredTask monitorStatus = null;
+
+  private Boolean traceEnabled = null;
+
+  /**
+   * Constructor (for failover)
+   */
+  public RestoreSnapshotProcedure() {
+  }
+
+  /**
+   * Constructor
+   * @param env MasterProcedureEnv
+   * @param hTableDescriptor the table to operate on
+   * @param snapshot snapshot to restore from
+   * @throws IOException
+   */
+  public RestoreSnapshotProcedure(
+      final MasterProcedureEnv env,
+      final HTableDescriptor hTableDescriptor,
+      final SnapshotDescription snapshot)
+      throws IOException {
+    // This is the new schema we are going to write out as this modification.
+    this.modifiedHTableDescriptor = hTableDescriptor;
+    // Snapshot information
+    this.snapshot = snapshot;
+    // User and owner information
+    this.user = env.getRequestUser().getUGI();
+    this.setOwner(this.user.getShortUserName());
+
+    // Monitor
+    getMonitorStatus();
+  }
+
+  /**
+   * Set up monitor status if it is not created.
+   */
+  private MonitoredTask getMonitorStatus() {
+    if (monitorStatus == null) {
+      monitorStatus = TaskMonitor.get().createStatus("Restoring  snapshot '" + snapshot.getName()
+        + "' to table " + getTableName());
+    }
+    return monitorStatus;
+  }
+
+  @Override
+  protected Flow executeFromState(final MasterProcedureEnv env, final RestoreSnapshotState state)
+      throws InterruptedException {
+    if (isTraceEnabled()) {
+      LOG.trace(this + " execute state=" + state);
+    }
+
+    // Make sure that the monitor status is set up
+    getMonitorStatus();
+
+    try {
+      switch (state) {
+        case RESTORE_SNAPSHOT_PRE_OPERATION:
+          // Verify if we can restore the table
+          prepareRestore(env);
+          setNextState(RestoreSnapshotState.RESTORE_SNAPSHOT_UPDATE_TABLE_DESCRIPTOR);
+          break;
+        case RESTORE_SNAPSHOT_UPDATE_TABLE_DESCRIPTOR:
+          updateTableDescriptor(env);
+          setNextState(RestoreSnapshotState.RESTORE_SNAPSHOT_WRITE_FS_LAYOUT);
+          break;
+        case RESTORE_SNAPSHOT_WRITE_FS_LAYOUT:
+          restoreSnapshot(env);
+          setNextState(RestoreSnapshotState.RESTORE_SNAPSHOT_UPDATE_META);
+          break;
+        case RESTORE_SNAPSHOT_UPDATE_META:
+          updateMETA(env);
+          return Flow.NO_MORE_STATE;
+        default:
+          throw new UnsupportedOperationException("unhandled state=" + state);
+      }
+    } catch (IOException e) {
+      LOG.error("Error trying to restore snapshot=" + getTableName() + " state=" + state, e);
+      setFailure("master-restore-snapshot", e);
+    }
+    return Flow.HAS_MORE_STATE;
+  }
+
+  @Override
+  protected void rollbackState(final MasterProcedureEnv env, final RestoreSnapshotState state)
+      throws IOException {
+    if (isTraceEnabled()) {
+      LOG.trace(this + " rollback state=" + state);
+    }
+
+    if (state == RestoreSnapshotState.RESTORE_SNAPSHOT_PRE_OPERATION) {
+      // nothing to rollback
+      return;
+    }
+
+    // The restore snapshot doesn't have a rollback. The execution will succeed, at some point.
+    throw new UnsupportedOperationException("unhandled state=" + state);
+  }
+
+  @Override
+  protected RestoreSnapshotState getState(final int stateId) {
+    return RestoreSnapshotState.valueOf(stateId);
+  }
+
+  @Override
+  protected int getStateId(final RestoreSnapshotState state) {
+    return state.getNumber();
+  }
+
+  @Override
+  protected RestoreSnapshotState getInitialState() {
+    return RestoreSnapshotState.RESTORE_SNAPSHOT_PRE_OPERATION;
+  }
+
+  @Override
+  protected void setNextState(final RestoreSnapshotState state) {
+    if (aborted.get()) {
+      setAbortFailure("create-table", "abort requested");
+    } else {
+      super.setNextState(state);
+    }
+  }
+
+  @Override
+  public TableName getTableName() {
+    return modifiedHTableDescriptor.getTableName();
+  }
+
+  @Override
+  public TableOperationType getTableOperationType() {
+    return TableOperationType.EDIT; // Restore is modifying a table
+  }
+
+  @Override
+  public boolean abort(final MasterProcedureEnv env) {
+    aborted.set(true);
+    return true;
+  }
+
+  @Override
+  public void toStringClassDetails(StringBuilder sb) {
+    sb.append(getClass().getSimpleName());
+    sb.append(" (table=");
+    sb.append(getTableName());
+    sb.append(" snapshot=");
+    sb.append(snapshot);
+    sb.append(")");
+  }
+
+  @Override
+  public void serializeStateData(final OutputStream stream) throws IOException {
+    super.serializeStateData(stream);
+
+    MasterProcedureProtos.RestoreSnapshotStateData.Builder restoreSnapshotMsg =
+      MasterProcedureProtos.RestoreSnapshotStateData.newBuilder()
+        .setUserInfo(MasterProcedureUtil.toProtoUserInfo(this.user))
+        .setSnapshot(this.snapshot)
+        .setModifiedTableSchema(modifiedHTableDescriptor.convert());
+
+    if (regionsToRestore != null) {
+      for (HRegionInfo hri: regionsToRestore) {
+        restoreSnapshotMsg.addRegionInfoForRestore(HRegionInfo.convert(hri));
+      }
+    }
+    if (regionsToRemove != null) {
+      for (HRegionInfo hri: regionsToRemove) {
+        restoreSnapshotMsg.addRegionInfoForRemove(HRegionInfo.convert(hri));
+      }
+    }
+    if (regionsToAdd != null) {
+      for (HRegionInfo hri: regionsToAdd) {
+        restoreSnapshotMsg.addRegionInfoForAdd(HRegionInfo.convert(hri));
+      }
+    }
+    if (!parentsToChildrenPairMap.isEmpty()) {
+      final Iterator>> it =
+        parentsToChildrenPairMap.entrySet().iterator();
+      while (it.hasNext()) {
+        final Map.Entry> entry = it.next();
+
+        MasterProcedureProtos.RestoreParentToChildRegionsPair.Builder parentToChildrenPair =
+          MasterProcedureProtos.RestoreParentToChildRegionsPair.newBuilder()
+          .setParentRegionName(entry.getKey())
+          .setChild1RegionName(entry.getValue().getFirst())
+          .setChild2RegionName(entry.getValue().getSecond());
+        restoreSnapshotMsg.addParentToChildRegionsPairList (parentToChildrenPair);
+      }
+    }
+    restoreSnapshotMsg.build().writeDelimitedTo(stream);
+  }
+
+  @Override
+  public void deserializeStateData(final InputStream stream) throws IOException {
+    super.deserializeStateData(stream);
+
+    MasterProcedureProtos.RestoreSnapshotStateData restoreSnapshotMsg =
+      MasterProcedureProtos.RestoreSnapshotStateData.parseDelimitedFrom(stream);
+    user = MasterProcedureUtil.toUserInfo(restoreSnapshotMsg.getUserInfo());
+    snapshot = restoreSnapshotMsg.getSnapshot();
+    modifiedHTableDescriptor =
+      HTableDescriptor.convert(restoreSnapshotMsg.getModifiedTableSchema());
+
+    if (restoreSnapshotMsg.getRegionInfoForRestoreCount() == 0) {
+      regionsToRestore = null;
+    } else {
+      regionsToRestore =
+        new ArrayList(restoreSnapshotMsg.getRegionInfoForRestoreCount());
+      for (HBaseProtos.RegionInfo hri: restoreSnapshotMsg.getRegionInfoForRestoreList()) {
+        regionsToRestore.add(HRegionInfo.convert(hri));
+      }
+    }
+    if (restoreSnapshotMsg.getRegionInfoForRemoveCount() == 0) {
+      regionsToRemove = null;
+    } else {
+      regionsToRemove =
+        new ArrayList(restoreSnapshotMsg.getRegionInfoForRemoveCount());
+      for (HBaseProtos.RegionInfo hri: restoreSnapshotMsg.getRegionInfoForRemoveList()) {
+        regionsToRemove.add(HRegionInfo.convert(hri));
+      }
+    }
+    if (restoreSnapshotMsg.getRegionInfoForAddCount() == 0) {
+      regionsToAdd = null;
+    } else {
+      regionsToAdd = new ArrayList(restoreSnapshotMsg.getRegionInfoForAddCount());
+      for (HBaseProtos.RegionInfo hri: restoreSnapshotMsg.getRegionInfoForAddList()) {
+        regionsToAdd.add(HRegionInfo.convert(hri));
+      }
+    }
+    if (restoreSnapshotMsg.getParentToChildRegionsPairListCount() > 0) {
+      for (MasterProcedureProtos.RestoreParentToChildRegionsPair parentToChildrenPair:
+        restoreSnapshotMsg.getParentToChildRegionsPairListList()) {
+        parentsToChildrenPairMap.put(
+          parentToChildrenPair.getParentRegionName(),
+          new Pair(
+            parentToChildrenPair.getChild1RegionName(),
+            parentToChildrenPair.getChild2RegionName()));
+      }
+    }
+  }
+
+  @Override
+  protected boolean acquireLock(final MasterProcedureEnv env) {
+    if (env.waitInitialized(this)) {
+      return false;
+    }
+    return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, getTableName());
+  }
+
+  @Override
+  protected void releaseLock(final MasterProcedureEnv env) {
+    env.getProcedureQueue().releaseTableExclusiveLock(this, getTableName());
+  }
+
+  /**
+   * Action before any real action of restoring from snapshot.
+   * @param env MasterProcedureEnv
+   * @throws IOException
+   */
+  private void prepareRestore(final MasterProcedureEnv env) throws IOException {
+    final TableName tableName = getTableName();
+    // Checks whether the table exists
+    if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
+      throw new TableNotFoundException(tableName);
+    }
+
+    // Check whether table is disabled.
+    env.getMasterServices().checkTableModifiable(tableName);
+
+    // Check that we have at least 1 CF
+    if (modifiedHTableDescriptor.getColumnFamilies().length == 0) {
+      throw new DoNotRetryIOException("Table " + getTableName().toString() +
+        " should have at least one column family.");
+    }
+
+    if (!getTableName().isSystemTable()) {
+      // Table already exist. Check and update the region quota for this table namespace.
+      final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
+      SnapshotManifest manifest = SnapshotManifest.open(
+        env.getMasterConfiguration(),
+        mfs.getFileSystem(),
+        SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, mfs.getRootDir()),
+        snapshot);
+      int snapshotRegionCount = manifest.getRegionManifestsMap().size();
+      int tableRegionCount =
+          ProcedureSyncWait.getMasterQuotaManager(env).getRegionCountOfTable(tableName);
+
+      if (snapshotRegionCount > 0 && tableRegionCount != snapshotRegionCount) {
+        ProcedureSyncWait.getMasterQuotaManager(env).checkAndUpdateNamespaceRegionQuota(
+          tableName, snapshotRegionCount);
+      }
+    }
+  }
+
+  /**
+   * Update descriptor
+   * @param env MasterProcedureEnv
+   * @throws IOException
+   **/
+  private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException {
+    env.getMasterServices().getTableDescriptors().add(modifiedHTableDescriptor);
+  }
+
+  /**
+   * Execute the on-disk Restore
+   * @param env MasterProcedureEnv
+   * @throws IOException
+   **/
+  private void restoreSnapshot(final MasterProcedureEnv env) throws IOException {
+    MasterFileSystem fileSystemManager = env.getMasterServices().getMasterFileSystem();
+    FileSystem fs = fileSystemManager.getFileSystem();
+    Path rootDir = fileSystemManager.getRootDir();
+    final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher();
+
+    LOG.info("Starting restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot));
+    try {
+      Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
+      SnapshotManifest manifest = SnapshotManifest.open(
+        env.getMasterServices().getConfiguration(), fs, snapshotDir, snapshot);
+      RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(
+        env.getMasterServices().getConfiguration(),
+        fs,
+        manifest,
+        modifiedHTableDescriptor,
+        rootDir,
+        monitorException,
+        getMonitorStatus());
+
+      RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
+      regionsToRestore = metaChanges.getRegionsToRestore();
+      regionsToRemove = metaChanges.getRegionsToRemove();
+      regionsToAdd = metaChanges.getRegionsToAdd();
+      parentsToChildrenPairMap = metaChanges.getParentToChildrenPairMap();
+    } catch (IOException e) {
+      String msg = "restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot)
+        + " failed in on-disk restore. Try re-running the restore command.";
+      LOG.error(msg, e);
+      monitorException.receive(
+        new ForeignException(env.getMasterServices().getServerName().toString(), e));
+      throw new IOException(msg, e);
+    }
+  }
+
+  /**
+   * Apply changes to hbase:meta
+   * @param env MasterProcedureEnv
+   * @throws IOException
+   **/
+  private void updateMETA(final MasterProcedureEnv env) throws IOException {
+    try {
+      Connection conn = env.getMasterServices().getConnection();
+
+      // 1. Forces all the RegionStates to be offline
+      //
+      // The AssignmentManager keeps all the region states around
+      // with no possibility to remove them, until the master is restarted.
+      // This means that a region marked as SPLIT before the restore will never be assigned again.
+      // To avoid having all states around all the regions are switched to the OFFLINE state,
+      // which is the same state that the regions will be after a delete table.
+      forceRegionsOffline(env, regionsToAdd);
+      forceRegionsOffline(env, regionsToRestore);
+      forceRegionsOffline(env, regionsToRemove);
+
+      getMonitorStatus().setStatus("Preparing to restore each region");
+
+      // 2. Applies changes to hbase:meta
+      // (2.1). Removes the current set of regions from META
+      //
+      // By removing also the regions to restore (the ones present both in the snapshot
+      // and in the current state) we ensure that no extra fields are present in META
+      // e.g. with a simple add addRegionToMeta() the splitA and splitB attributes
+      // not overwritten/removed, so you end up with old informations
+      // that are not correct after the restore.
+      if (regionsToRemove != null) {
+        MetaTableAccessor.deleteRegions(conn, regionsToRemove);
+      }
+
+      // (2.2). Add the new set of regions to META
+      //
+      // At this point the old regions are no longer present in META.
+      // and the set of regions present in the snapshot will be written to META.
+      // All the information in hbase:meta are coming from the .regioninfo of each region present
+      // in the snapshot folder.
+      if (regionsToAdd != null) {
+        MetaTableAccessor.addRegionsToMeta(
+          conn,
+          regionsToAdd,
+          modifiedHTableDescriptor.getRegionReplication());
+      }
+
+      if (regionsToRestore != null) {
+        MetaTableAccessor.overwriteRegions(
+          conn,
+          regionsToRestore,
+          modifiedHTableDescriptor.getRegionReplication());
+      }
+
+      RestoreSnapshotHelper.RestoreMetaChanges metaChanges =
+        new RestoreSnapshotHelper.RestoreMetaChanges(
+          modifiedHTableDescriptor, parentsToChildrenPairMap);
+      metaChanges.updateMetaParentRegions(conn, regionsToAdd);
+
+      // At this point the restore is complete.
+      LOG.info("Restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) +
+        " on table=" + getTableName() + " completed!");
+    } catch (IOException e) {
+      final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher();
+      String msg = "restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot)
+          + " failed in meta update. Try re-running the restore command.";
+      LOG.error(msg, e);
+      monitorException.receive(
+        new ForeignException(env.getMasterServices().getServerName().toString(), e));
+      throw new IOException(msg, e);
+    }
+
+    monitorStatus.markComplete("Restore snapshot '"+ snapshot.getName() +"'!");
+    MetricsSnapshot metricsSnapshot = new MetricsSnapshot();
+    metricsSnapshot.addSnapshotRestore(
+      monitorStatus.getCompletionTimestamp() - monitorStatus.getStartTime());
+  }
+
+  /**
+   * Make sure that region states of the region list is in OFFLINE state.
+   * @param env MasterProcedureEnv
+   * @param hris region info list
+   **/
+  private void forceRegionsOffline(final MasterProcedureEnv env, final List hris) {
+    RegionStates states = env.getMasterServices().getAssignmentManager().getRegionStates();
+    if (hris != null) {
+      for (HRegionInfo hri: hris) {
+        states.regionOffline(hri);
+      }
+    }
+  }
+
+  /**
+   * The procedure could be restarted from a different machine. If the variable is null, we need to
+   * retrieve it.
+   * @return traceEnabled
+   */
+  private Boolean isTraceEnabled() {
+    if (traceEnabled == null) {
+      traceEnabled = LOG.isTraceEnabled();
+    }
+    return traceEnabled;
+  }
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
deleted file mode 100644
index 2a6dca8..0000000
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.master.snapshot;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.CancellationException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
-import org.apache.hadoop.hbase.TableExistsException;
-import org.apache.hadoop.hbase.errorhandling.ForeignException;
-import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
-import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.master.MetricsSnapshot;
-import org.apache.hadoop.hbase.master.SnapshotSentinel;
-import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
-import org.apache.hadoop.hbase.monitoring.MonitoredTask;
-import org.apache.hadoop.hbase.monitoring.TaskMonitor;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
-import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
-import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Handler to Clone a snapshot.
- *
- * 

Uses {@link RestoreSnapshotHelper} to create a new table with the same - * content of the specified snapshot. - */ -@InterfaceAudience.Private -public class CloneSnapshotHandler extends CreateTableHandler implements SnapshotSentinel { - private static final Log LOG = LogFactory.getLog(CloneSnapshotHandler.class); - - private final static String NAME = "Master CloneSnapshotHandler"; - - private final SnapshotDescription snapshot; - - private final ForeignExceptionDispatcher monitor; - private final MetricsSnapshot metricsSnapshot = new MetricsSnapshot(); - private final MonitoredTask status; - - private RestoreSnapshotHelper.RestoreMetaChanges metaChanges; - - private volatile boolean stopped = false; - - public CloneSnapshotHandler(final MasterServices masterServices, - final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor) { - super(masterServices, masterServices.getMasterFileSystem(), hTableDescriptor, - masterServices.getConfiguration(), null, masterServices); - - // Snapshot information - this.snapshot = snapshot; - - // Monitor - this.monitor = new ForeignExceptionDispatcher(); - this.status = TaskMonitor.get().createStatus("Cloning snapshot '" + snapshot.getName() + - "' to table " + hTableDescriptor.getTableName()); - } - - @Override - public CloneSnapshotHandler prepare() throws NotAllMetaRegionsOnlineException, - TableExistsException, IOException { - return (CloneSnapshotHandler) super.prepare(); - } - - /** - * Create the on-disk regions, using the tableRootDir provided by the CreateTableHandler. - * The cloned table will be created in a temp directory, and then the CreateTableHandler - * will be responsible to add the regions returned by this method to hbase:meta and do the assignment. - */ - @Override - protected List handleCreateHdfsRegions(final Path tableRootDir, - final TableName tableName) throws IOException { - status.setStatus("Creating regions for table: " + tableName); - FileSystem fs = fileSystemManager.getFileSystem(); - Path rootDir = fileSystemManager.getRootDir(); - - try { - // 1. Execute the on-disk Clone - Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir); - SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshot); - RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(conf, fs, - manifest, hTableDescriptor, tableRootDir, monitor, status); - metaChanges = restoreHelper.restoreHdfsRegions(); - - // Clone operation should not have stuff to restore or remove - Preconditions.checkArgument(!metaChanges.hasRegionsToRestore(), - "A clone should not have regions to restore"); - Preconditions.checkArgument(!metaChanges.hasRegionsToRemove(), - "A clone should not have regions to remove"); - - // At this point the clone is complete. Next step is enabling the table. - String msg = "Clone snapshot="+ snapshot.getName() +" on table=" + tableName + " completed!"; - LOG.info(msg); - status.setStatus(msg + " Waiting for table to be enabled..."); - - // 2. let the CreateTableHandler add the regions to meta - return metaChanges.getRegionsToAdd(); - } catch (Exception e) { - String msg = "clone snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + - " failed because " + e.getMessage(); - LOG.error(msg, e); - IOException rse = new RestoreSnapshotException(msg, e, snapshot); - - // these handlers aren't futures so we need to register the error here. - this.monitor.receive(new ForeignException(NAME, rse)); - throw rse; - } - } - - @Override - protected void addRegionsToMeta(final List regionInfos, - int regionReplication) - throws IOException { - super.addRegionsToMeta(regionInfos, regionReplication); - metaChanges.updateMetaParentRegions(this.server.getConnection(), regionInfos); - } - - @Override - protected void completed(final Throwable exception) { - this.stopped = true; - if (exception != null) { - status.abort("Snapshot '" + snapshot.getName() + "' clone failed because " + - exception.getMessage()); - } else { - status.markComplete("Snapshot '"+ snapshot.getName() +"' clone completed and table enabled!"); - } - metricsSnapshot.addSnapshotClone(status.getCompletionTimestamp() - status.getStartTime()); - super.completed(exception); - } - - @Override - public boolean isFinished() { - return this.stopped; - } - - @Override - public long getCompletionTimestamp() { - return this.status.getCompletionTimestamp(); - } - - @Override - public SnapshotDescription getSnapshot() { - return snapshot; - } - - @Override - public void cancel(String why) { - if (this.stopped) return; - this.stopped = true; - String msg = "Stopping clone snapshot=" + snapshot + " because: " + why; - LOG.info(msg); - status.abort(msg); - this.monitor.receive(new ForeignException(NAME, new CancellationException(why))); - } - - @Override - public ForeignException getExceptionIfFailed() { - return this.monitor.getException(); - } - - @Override - public void rethrowExceptionIfFailed() throws ForeignException { - monitor.rethrowException(); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java deleted file mode 100644 index 56faf76..0000000 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java +++ /dev/null @@ -1,245 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.master.snapshot; - -import java.io.IOException; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.CancellationException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.errorhandling.ForeignException; -import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; -import org.apache.hadoop.hbase.executor.EventType; -import org.apache.hadoop.hbase.master.AssignmentManager; -import org.apache.hadoop.hbase.master.MasterFileSystem; -import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.MetricsSnapshot; -import org.apache.hadoop.hbase.master.RegionStates; -import org.apache.hadoop.hbase.master.SnapshotSentinel; -import org.apache.hadoop.hbase.master.handler.TableEventHandler; -import org.apache.hadoop.hbase.monitoring.MonitoredTask; -import org.apache.hadoop.hbase.monitoring.TaskMonitor; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; -import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; -import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; -import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; -import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; -import org.apache.hadoop.hbase.snapshot.SnapshotManifest; - -/** - * Handler to Restore a snapshot. - * - *

Uses {@link RestoreSnapshotHelper} to replace the table content with the - * data available in the snapshot. - */ -@InterfaceAudience.Private -public class RestoreSnapshotHandler extends TableEventHandler implements SnapshotSentinel { - private static final Log LOG = LogFactory.getLog(RestoreSnapshotHandler.class); - - private final HTableDescriptor hTableDescriptor; - private final SnapshotDescription snapshot; - - private final ForeignExceptionDispatcher monitor; - private final MetricsSnapshot metricsSnapshot = new MetricsSnapshot(); - private final MonitoredTask status; - - private volatile boolean stopped = false; - - public RestoreSnapshotHandler(final MasterServices masterServices, - final SnapshotDescription snapshot, final HTableDescriptor htd) throws IOException { - super(EventType.C_M_RESTORE_SNAPSHOT, htd.getTableName(), masterServices, masterServices); - - // Snapshot information - this.snapshot = snapshot; - - // Monitor - this.monitor = new ForeignExceptionDispatcher(); - - // Check table exists. - getTableDescriptor(); - - // This is the new schema we are going to write out as this modification. - this.hTableDescriptor = htd; - - this.status = TaskMonitor.get().createStatus( - "Restoring snapshot '" + snapshot.getName() + "' to table " - + hTableDescriptor.getTableName()); - } - - @Override - public RestoreSnapshotHandler prepare() throws IOException { - return (RestoreSnapshotHandler) super.prepare(); - } - - /** - * The restore table is executed in place. - * - The on-disk data will be restored - reference files are put in place without moving data - * - [if something fail here: you need to delete the table and re-run the restore] - * - hbase:meta will be updated - * - [if something fail here: you need to run hbck to fix hbase:meta entries] - * The passed in list gets changed in this method - */ - @Override - protected void handleTableOperation(List hris) throws IOException { - MasterFileSystem fileSystemManager = masterServices.getMasterFileSystem(); - Connection conn = masterServices.getConnection(); - FileSystem fs = fileSystemManager.getFileSystem(); - Path rootDir = fileSystemManager.getRootDir(); - TableName tableName = hTableDescriptor.getTableName(); - - try { - // 1. Update descriptor - this.masterServices.getTableDescriptors().add(hTableDescriptor); - - // 2. Execute the on-disk Restore - LOG.debug("Starting restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot)); - Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir); - SnapshotManifest manifest = SnapshotManifest.open(masterServices.getConfiguration(), fs, - snapshotDir, snapshot); - RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper( - masterServices.getConfiguration(), fs, manifest, - this.hTableDescriptor, rootDir, monitor, status); - RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions(); - - // 3. Forces all the RegionStates to be offline - // - // The AssignmentManager keeps all the region states around - // with no possibility to remove them, until the master is restarted. - // This means that a region marked as SPLIT before the restore will never be assigned again. - // To avoid having all states around all the regions are switched to the OFFLINE state, - // which is the same state that the regions will be after a delete table. - forceRegionsOffline(metaChanges); - - // 4. Applies changes to hbase:meta - status.setStatus("Preparing to restore each region"); - - // 4.1 Removes the current set of regions from META - // - // By removing also the regions to restore (the ones present both in the snapshot - // and in the current state) we ensure that no extra fields are present in META - // e.g. with a simple add addRegionToMeta() the splitA and splitB attributes - // not overwritten/removed, so you end up with old informations - // that are not correct after the restore. - List hrisToRemove = new LinkedList(); - if (metaChanges.hasRegionsToRemove()) hrisToRemove.addAll(metaChanges.getRegionsToRemove()); - MetaTableAccessor.deleteRegions(conn, hrisToRemove); - - // 4.2 Add the new set of regions to META - // - // At this point the old regions are no longer present in META. - // and the set of regions present in the snapshot will be written to META. - // All the information in hbase:meta are coming from the .regioninfo of each region present - // in the snapshot folder. - hris.clear(); - if (metaChanges.hasRegionsToAdd()) hris.addAll(metaChanges.getRegionsToAdd()); - MetaTableAccessor.addRegionsToMeta(conn, hris, hTableDescriptor.getRegionReplication()); - if (metaChanges.hasRegionsToRestore()) { - MetaTableAccessor.overwriteRegions(conn, metaChanges.getRegionsToRestore(), - hTableDescriptor.getRegionReplication()); - } - metaChanges.updateMetaParentRegions(this.server.getConnection(), hris); - - // At this point the restore is complete. Next step is enabling the table. - LOG.info("Restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + - " on table=" + tableName + " completed!"); - } catch (IOException e) { - String msg = "restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) - + " failed. Try re-running the restore command."; - LOG.error(msg, e); - monitor.receive(new ForeignException(masterServices.getServerName().toString(), e)); - throw new RestoreSnapshotException(msg, e); - } - } - - private void forceRegionsOffline(final RestoreSnapshotHelper.RestoreMetaChanges metaChanges) { - forceRegionsOffline(metaChanges.getRegionsToAdd()); - forceRegionsOffline(metaChanges.getRegionsToRestore()); - forceRegionsOffline(metaChanges.getRegionsToRemove()); - } - - private void forceRegionsOffline(final List hris) { - AssignmentManager am = this.masterServices.getAssignmentManager(); - RegionStates states = am.getRegionStates(); - if (hris != null) { - for (HRegionInfo hri: hris) { - states.regionOffline(hri); - } - } - } - - @Override - protected void completed(final Throwable exception) { - this.stopped = true; - if (exception != null) { - status.abort("Restore snapshot '" + snapshot.getName() + "' failed because " + - exception.getMessage()); - } else { - status.markComplete("Restore snapshot '"+ snapshot.getName() +"'!"); - } - metricsSnapshot.addSnapshotRestore(status.getCompletionTimestamp() - status.getStartTime()); - super.completed(exception); - } - - @Override - public boolean isFinished() { - return this.stopped; - } - - @Override - public long getCompletionTimestamp() { - return this.status.getCompletionTimestamp(); - } - - @Override - public SnapshotDescription getSnapshot() { - return snapshot; - } - - @Override - public void cancel(String why) { - if (this.stopped) return; - this.stopped = true; - String msg = "Stopping restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) - + " because: " + why; - LOG.info(msg); - CancellationException ce = new CancellationException(why); - this.monitor.receive(new ForeignException(masterServices.getServerName().toString(), ce)); - } - - @Override - public ForeignException getExceptionIfFailed() { - return this.monitor.getException(); - } - - @Override - public void rethrowExceptionIfFailed() throws ForeignException { - monitor.rethrowException(); - } -} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index db718ee..50a1832 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -56,16 +56,19 @@ import org.apache.hadoop.hbase.master.MetricsMaster; import org.apache.hadoop.hbase.master.SnapshotSentinel; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; import org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner; +import org.apache.hadoop.hbase.master.procedure.CloneSnapshotProcedure; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.RestoreSnapshotProcedure; import org.apache.hadoop.hbase.procedure.MasterProcedureManager; import org.apache.hadoop.hbase.procedure.Procedure; import org.apache.hadoop.hbase.procedure.ProcedureCoordinator; import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs; import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinatorRpcs; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type; -import org.apache.hadoop.hbase.quotas.QuotaExceededException; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; @@ -145,12 +148,14 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable private Map snapshotHandlers = new HashMap(); - // Restore Sentinels map, with table name as key. + // Restore map, with table name as key, procedure ID as value. // The map is always accessed and modified under the object lock using synchronized. - // restoreSnapshot()/cloneSnapshot() will insert an Handler in the table. - // isRestoreDone() will remove the handler requested if the operation is finished. - private Map restoreHandlers = - new HashMap(); + // restoreSnapshot()/cloneSnapshot() will insert a procedure ID in the map. + // + // TODO: just as the Apache HBase 1.x implementation, this map would not survive master + // restart/failover. This is just a stopgap implementation until implementation of taking + // snapshot using Procedure-V2. + private Map restoreTableToProcIdMap = new HashMap(); private Path rootDir; private ExecutorService executorService; @@ -426,11 +431,9 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable // make sure we aren't running a restore on the same table if (isRestoringTable(snapshotTable)) { - SnapshotSentinel handler = restoreHandlers.get(snapshotTable); throw new SnapshotCreationException("Rejected taking " + ClientSnapshotDescriptionUtils.toString(snapshot) - + " because we are already have a restore in progress on the same snapshot " - + ClientSnapshotDescriptionUtils.toString(handler.getSnapshot()), snapshot); + + " because we are already have a restore in progress on the same snapshot."); } try { @@ -647,14 +650,61 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable } /** + * Clone the specified snapshot. + * The clone will fail if the destination table has a snapshot or restore in progress. + * + * @param reqSnapshot Snapshot Descriptor from request + * @param tableName table to clone + * @param snapshot Snapshot Descriptor + * @param snapshotTableDesc Table Descriptor + * @param nonceGroup unique value to prevent duplicated RPC + * @param nonce unique value to prevent duplicated RPC + * @return procId the ID of the clone snapshot procedure + * @throws IOException + */ + private long cloneSnapshot( + final SnapshotDescription reqSnapshot, + final TableName tableName, + final SnapshotDescription snapshot, + final HTableDescriptor snapshotTableDesc, + final long nonceGroup, + final long nonce) throws IOException { + MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost(); + HTableDescriptor htd = new HTableDescriptor(tableName, snapshotTableDesc); + if (cpHost != null) { + cpHost.preCloneSnapshot(reqSnapshot, htd); + } + long procId; + try { + procId = cloneSnapshot(snapshot, htd, nonceGroup, nonce); + } catch (IOException e) { + LOG.error("Exception occurred while cloning the snapshot " + snapshot.getName() + + " as table " + tableName.getNameAsString(), e); + throw e; + } + LOG.info("Clone snapshot=" + snapshot.getName() + " as table=" + tableName); + + if (cpHost != null) { + cpHost.postCloneSnapshot(reqSnapshot, htd); + } + return procId; + } + + /** * Clone the specified snapshot into a new table. * The operation will fail if the destination table has a snapshot or restore in progress. * * @param snapshot Snapshot Descriptor * @param hTableDescriptor Table Descriptor of the table to create + * @param nonceGroup unique value to prevent duplicated RPC + * @param nonce unique value to prevent duplicated RPC + * @return procId the ID of the clone snapshot procedure */ - synchronized void cloneSnapshot(final SnapshotDescription snapshot, - final HTableDescriptor hTableDescriptor) throws HBaseSnapshotException { + synchronized long cloneSnapshot( + final SnapshotDescription snapshot, + final HTableDescriptor hTableDescriptor, + final long nonceGroup, + final long nonce) throws HBaseSnapshotException { TableName tableName = hTableDescriptor.getTableName(); // make sure we aren't running a snapshot on the same table @@ -668,27 +718,34 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable } try { - CloneSnapshotHandler handler = - new CloneSnapshotHandler(master, snapshot, hTableDescriptor).prepare(); - this.executorService.submit(handler); - this.restoreHandlers.put(tableName, handler); + long procId = master.getMasterProcedureExecutor().submitProcedure( + new CloneSnapshotProcedure( + master.getMasterProcedureExecutor().getEnvironment(), hTableDescriptor, snapshot), + nonceGroup, + nonce); + this.restoreTableToProcIdMap.put(tableName, procId); + return procId; } catch (Exception e) { - String msg = "Couldn't clone the snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + - " on table=" + tableName; + String msg = "Couldn't clone the snapshot=" + + ClientSnapshotDescriptionUtils.toString(snapshot) + " on table=" + tableName; LOG.error(msg, e); throw new RestoreSnapshotException(msg, e); } } /** - * Restore the specified snapshot + * Restore or Clone the specified snapshot * @param reqSnapshot + * @param nonceGroup unique value to prevent duplicated RPC + * @param nonce unique value to prevent duplicated RPC * @throws IOException */ - public void restoreSnapshot(SnapshotDescription reqSnapshot) throws IOException { + public long restoreOrCloneSnapshot( + SnapshotDescription reqSnapshot, + final long nonceGroup, + final long nonce) throws IOException { FileSystem fs = master.getMasterFileSystem().getFileSystem(); Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(reqSnapshot, rootDir); - MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost(); // check if the snapshot exists if (!fs.exists(snapshotDir)) { @@ -712,109 +769,66 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, manifest); // Execute the restore/clone operation + long procId; if (MetaTableAccessor.tableExists(master.getConnection(), tableName)) { - if (master.getTableStateManager().isTableState( - TableName.valueOf(snapshot.getTable()), TableState.State.ENABLED)) { - throw new UnsupportedOperationException("Table '" + - TableName.valueOf(snapshot.getTable()) + "' must be disabled in order to " + - "perform a restore operation" + - "."); - } - - // call coproc pre hook - if (cpHost != null) { - cpHost.preRestoreSnapshot(reqSnapshot, snapshotTableDesc); - } - - int tableRegionCount = -1; - try { - // Table already exist. Check and update the region quota for this table namespace. - // The region quota may not be updated correctly if there are concurrent restore snapshot - // requests for the same table - - tableRegionCount = getRegionCountOfTable(tableName); - int snapshotRegionCount = manifest.getRegionManifestsMap().size(); - - // Update region quota when snapshotRegionCount is larger. If we updated the region count - // to a smaller value before retoreSnapshot and the retoreSnapshot fails, we may fail to - // reset the region count to its original value if the region quota is consumed by other - // tables in the namespace - if (tableRegionCount > 0 && tableRegionCount < snapshotRegionCount) { - checkAndUpdateNamespaceRegionQuota(snapshotRegionCount, tableName); - } - restoreSnapshot(snapshot, snapshotTableDesc); - // Update the region quota if snapshotRegionCount is smaller. This step should not fail - // because we have reserved enough region quota before hand - if (tableRegionCount > 0 && tableRegionCount > snapshotRegionCount) { - checkAndUpdateNamespaceRegionQuota(snapshotRegionCount, tableName); - } - } catch (QuotaExceededException e) { - LOG.error("Region quota exceeded while restoring the snapshot " + snapshot.getName() - + " as table " + tableName.getNameAsString(), e); - // If QEE is thrown before restoreSnapshot, quota information is not updated, so we - // should throw the exception directly. If QEE is thrown after restoreSnapshot, there - // must be unexpected reasons, we also throw the exception directly - throw e; - } catch (IOException e) { - if (tableRegionCount > 0) { - // reset the region count for table - checkAndUpdateNamespaceRegionQuota(tableRegionCount, tableName); - } - LOG.error("Exception occurred while restoring the snapshot " + snapshot.getName() - + " as table " + tableName.getNameAsString(), e); - throw e; - } - LOG.info("Restore snapshot=" + snapshot.getName() + " as table=" + tableName); - - if (cpHost != null) { - cpHost.postRestoreSnapshot(reqSnapshot, snapshotTableDesc); - } + procId = restoreSnapshot( + reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceGroup, nonce); } else { - HTableDescriptor htd = new HTableDescriptor(tableName, snapshotTableDesc); - if (cpHost != null) { - cpHost.preCloneSnapshot(reqSnapshot, htd); - } - try { - checkAndUpdateNamespaceQuota(manifest, tableName); - cloneSnapshot(snapshot, htd); - } catch (IOException e) { - this.master.getMasterQuotaManager().removeTableFromNamespaceQuota(tableName); - LOG.error("Exception occurred while cloning the snapshot " + snapshot.getName() - + " as table " + tableName.getNameAsString(), e); - throw e; - } - LOG.info("Clone snapshot=" + snapshot.getName() + " as table=" + tableName); - - if (cpHost != null) { - cpHost.postCloneSnapshot(reqSnapshot, htd); - } + procId = cloneSnapshot( + reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceGroup, nonce); } + return procId; } - private void checkAndUpdateNamespaceQuota(SnapshotManifest manifest, TableName tableName) - throws IOException { - if (this.master.getMasterQuotaManager().isQuotaEnabled()) { - this.master.getMasterQuotaManager().checkNamespaceTableAndRegionQuota(tableName, - manifest.getRegionManifestsMap().size()); + /** + * Restore the specified snapshot. + * The restore will fail if the destination table has a snapshot or restore in progress. + * + * @param reqSnapshot Snapshot Descriptor from request + * @param tableName table to restore + * @param snapshot Snapshot Descriptor + * @param snapshotTableDesc Table Descriptor + * @param nonceGroup unique value to prevent duplicated RPC + * @param nonce unique value to prevent duplicated RPC + * @return procId the ID of the restore snapshot procedure + * @throws IOException + */ + private long restoreSnapshot( + final SnapshotDescription reqSnapshot, + final TableName tableName, + final SnapshotDescription snapshot, + final HTableDescriptor snapshotTableDesc, + final long nonceGroup, + final long nonce) throws IOException { + MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost(); + + if (master.getTableStateManager().isTableState( + TableName.valueOf(snapshot.getTable()), TableState.State.ENABLED)) { + throw new UnsupportedOperationException("Table '" + + TableName.valueOf(snapshot.getTable()) + "' must be disabled in order to " + + "perform a restore operation."); } - } - private void checkAndUpdateNamespaceRegionQuota(int updatedRegionCount, TableName tableName) - throws IOException { - if (this.master.getMasterQuotaManager().isQuotaEnabled()) { - this.master.getMasterQuotaManager().checkAndUpdateNamespaceRegionQuota(tableName, - updatedRegionCount); + // call Coprocessor pre hook + if (cpHost != null) { + cpHost.preRestoreSnapshot(reqSnapshot, snapshotTableDesc); } - } - /** - * @return cached region count, or -1 if quota manager is disabled or table status not found - */ - private int getRegionCountOfTable(TableName tableName) throws IOException { - if (this.master.getMasterQuotaManager().isQuotaEnabled()) { - return this.master.getMasterQuotaManager().getRegionCountOfTable(tableName); + long procId; + try { + procId = restoreSnapshot(snapshot, snapshotTableDesc, nonceGroup, nonce); + } catch (IOException e) { + LOG.error("Exception occurred while restoring the snapshot " + snapshot.getName() + + " as table " + tableName.getNameAsString(), e); + throw e; + } + LOG.info("Restore snapshot=" + snapshot.getName() + " as table=" + tableName); + + if (cpHost != null) { + cpHost.postRestoreSnapshot(reqSnapshot, snapshotTableDesc); } - return -1; + + return procId; } /** @@ -823,9 +837,15 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable * * @param snapshot Snapshot Descriptor * @param hTableDescriptor Table Descriptor + * @param nonceGroup unique value to prevent duplicated RPC + * @param nonce unique value to prevent duplicated RPC + * @return procId the ID of the restore snapshot procedure */ - private synchronized void restoreSnapshot(final SnapshotDescription snapshot, - final HTableDescriptor hTableDescriptor) throws HBaseSnapshotException { + private synchronized long restoreSnapshot( + final SnapshotDescription snapshot, + final HTableDescriptor hTableDescriptor, + final long nonceGroup, + final long nonce) throws HBaseSnapshotException { TableName tableName = hTableDescriptor.getTableName(); // make sure we aren't running a snapshot on the same table @@ -839,10 +859,13 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable } try { - RestoreSnapshotHandler handler = - new RestoreSnapshotHandler(master, snapshot, hTableDescriptor).prepare(); - this.executorService.submit(handler); - restoreHandlers.put(tableName, handler); + long procId = master.getMasterProcedureExecutor().submitProcedure( + new RestoreSnapshotProcedure( + master.getMasterProcedureExecutor().getEnvironment(), hTableDescriptor, snapshot), + nonceGroup, + nonce); + this.restoreTableToProcIdMap.put(tableName, procId); + return procId; } catch (Exception e) { String msg = "Couldn't restore the snapshot=" + ClientSnapshotDescriptionUtils.toString( snapshot) + @@ -859,50 +882,18 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable * @return true if there is a restore in progress of the specified table. */ private synchronized boolean isRestoringTable(final TableName tableName) { - SnapshotSentinel sentinel = this.restoreHandlers.get(tableName); - return(sentinel != null && !sentinel.isFinished()); - } - - /** - * Returns the status of a restore operation. - * If the in-progress restore is failed throws the exception that caused the failure. - * - * @param snapshot - * @return false if in progress, true if restore is completed or not requested. - * @throws IOException if there was a failure during the restore - */ - public boolean isRestoreDone(final SnapshotDescription snapshot) throws IOException { - // check to see if the sentinel exists, - // and if the task is complete removes it from the in-progress restore map. - SnapshotSentinel sentinel = removeSentinelIfFinished(this.restoreHandlers, snapshot); - - // stop tracking "abandoned" handlers - cleanupSentinels(); - - if (sentinel == null) { - // there is no sentinel so restore is not in progress. - return true; + Long procId = this.restoreTableToProcIdMap.get(tableName); + if (procId == null) { + return false; } - - LOG.debug("Verify snapshot=" + snapshot.getName() + " against=" - + sentinel.getSnapshot().getName() + " table=" + - TableName.valueOf(snapshot.getTable())); - - // If the restore is failed, rethrow the exception - sentinel.rethrowExceptionIfFailed(); - - // check to see if we are done - if (sentinel.isFinished()) { - LOG.debug("Restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + - " has completed. Notifying the client."); + ProcedureExecutor procExec = master.getMasterProcedureExecutor(); + if (procExec.isRunning() && !procExec.isFinished(procId)) { return true; + } else { + this.restoreTableToProcIdMap.remove(tableName); + return false; } - if (LOG.isDebugEnabled()) { - LOG.debug("Sentinel is not yet finished with restoring snapshot=" + - ClientSnapshotDescriptionUtils.toString(snapshot)); - } - return false; } /** @@ -947,7 +938,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable */ private void cleanupSentinels() { cleanupSentinels(this.snapshotHandlers); - cleanupSentinels(this.restoreHandlers); + cleanupCompletedRestoreInMap(); } /** @@ -970,6 +961,21 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable } } + /** + * Remove the procedures that are marked as finished + */ + private synchronized void cleanupCompletedRestoreInMap() { + ProcedureExecutor procExec = master.getMasterProcedureExecutor(); + Iterator> it = restoreTableToProcIdMap.entrySet().iterator(); + while (it.hasNext()) { + Map.Entry entry = it.next(); + Long procId = entry.getValue(); + if (procExec.isRunning() && procExec.isFinished(procId)) { + it.remove(); + } + } + } + // // Implementing Stoppable interface // @@ -985,10 +991,6 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable snapshotHandler.cancel(why); } - // pass the stop onto all the restore handlers - for (SnapshotSentinel restoreHandler: this.restoreHandlers.values()) { - restoreHandler.cancel(why); - } try { if (coordinator != null) { coordinator.close(); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java index 0f14f70..a8542de 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java @@ -175,7 +175,7 @@ public class RestoreSnapshotHelper { } private RestoreMetaChanges restoreHdfsRegions(final ThreadPoolExecutor exec) throws IOException { - LOG.debug("starting restore"); + LOG.info("starting restore table regions using snapshot=" + snapshotDesc); Map regionManifests = snapshotManifest.getRegionManifestsMap(); if (regionManifests == null) { @@ -251,6 +251,8 @@ public class RestoreSnapshotHelper { status.setStatus("Finished cloning regions."); } + LOG.info("finishing restore table regions using snapshot=" + snapshotDesc); + return metaChanges; } @@ -265,7 +267,7 @@ public class RestoreSnapshotHelper { private List regionsToRemove = null; private List regionsToAdd = null; - RestoreMetaChanges(HTableDescriptor htd, Map > parentsMap) { + public RestoreMetaChanges(HTableDescriptor htd, Map > parentsMap) { this.parentsMap = parentsMap; this.htd = htd; } @@ -275,6 +277,14 @@ public class RestoreSnapshotHelper { } /** + * Returns the map of parent-children_pair. + * @return the map + */ + public Map> getParentToChildrenPairMap() { + return this.parentsMap; + } + + /** * @return true if there're new regions */ public boolean hasRegionsToAdd() { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java index 0a3093b..002e04e 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java @@ -44,7 +44,6 @@ import org.junit.Before; import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; -import org.junit.Ignore; import org.junit.experimental.categories.Category; /** diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java index 9731aa4..430a017 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java @@ -129,8 +129,8 @@ public class MasterProcedureTestingUtility { assertEquals(family.length, htd.getFamilies().size()); } - public static void validateTableDeletion(final HMaster master, final TableName tableName, - final HRegionInfo[] regions, String... family) throws IOException { + public static void validateTableDeletion( + final HMaster master, final TableName tableName) throws IOException { // check filesystem final FileSystem fs = master.getMasterFileSystem().getFileSystem(); final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java new file mode 100644 index 0000000..aeafbf8 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java @@ -0,0 +1,239 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ProcedureInfo; +import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotState; +import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertTrue; + +@Category({MasterTests.class, MediumTests.class}) +public class TestCloneSnapshotProcedure { + private static final Log LOG = LogFactory.getLog(TestCloneSnapshotProcedure.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + protected final byte[] CF = Bytes.toBytes("cf1"); + + private static long nonceGroup = HConstants.NO_NONCE; + private static long nonce = HConstants.NO_NONCE; + + private static SnapshotDescription snapshot = null; + + private static void setupConf(Configuration conf) { + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + } + + @BeforeClass + public static void setupCluster() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(1); + } + + @AfterClass + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Before + public void setup() throws Exception { + resetProcExecutorTestingKillFlag(); + nonceGroup = + MasterProcedureTestingUtility.generateNonceGroup(UTIL.getHBaseCluster().getMaster()); + nonce = MasterProcedureTestingUtility.generateNonce(UTIL.getHBaseCluster().getMaster()); + } + + @After + public void tearDown() throws Exception { + resetProcExecutorTestingKillFlag(); + } + + private void resetProcExecutorTestingKillFlag() { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); + assertTrue("expected executor to be running", procExec.isRunning()); + } + + private SnapshotDescription getSnapshot() throws Exception { + if (snapshot == null) { + final TableName snapshotTableName = TableName.valueOf("testCloneSnapshot"); + long tid = System.currentTimeMillis(); + final byte[] snapshotName = Bytes.toBytes("snapshot-" + tid); + + Admin admin = UTIL.getHBaseAdmin(); + // create Table + SnapshotTestingUtils.createTable(UTIL, snapshotTableName, getNumReplicas(), CF); + // Load data + SnapshotTestingUtils.loadData(UTIL, snapshotTableName, 500, CF); + admin.disableTable(snapshotTableName); + // take a snapshot + admin.snapshot(snapshotName, snapshotTableName); + admin.enableTable(snapshotTableName); + + List snapshotList = admin.listSnapshots(); + snapshot = snapshotList.get(0); + } + return snapshot; + } + + private int getNumReplicas() { + return 1; + } + + public static HTableDescriptor createHTableDescriptor( + final TableName tableName, final byte[] ... family) { + HTableDescriptor htd = new HTableDescriptor(tableName); + for (int i = 0; i < family.length; ++i) { + htd.addFamily(new HColumnDescriptor(family[i])); + } + return htd; + } + + @Test(timeout=60000) + public void testCloneSnapshot() throws Exception { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + final TableName clonedTableName = TableName.valueOf("testCloneSnapshot2"); + final HTableDescriptor htd = createHTableDescriptor(clonedTableName, CF); + + long procId = ProcedureTestingUtility.submitAndWait( + procExec, new CloneSnapshotProcedure(procExec.getEnvironment(), htd, getSnapshot())); + ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId)); + MasterProcedureTestingUtility.validateTableIsEnabled( + UTIL.getHBaseCluster().getMaster(), + clonedTableName); + } + + @Test(timeout = 60000) + public void testCloneSnapshotTwiceWithSameNonce() throws Exception { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + final TableName clonedTableName = TableName.valueOf("testCloneSnapshotTwiceWithSameNonce"); + final HTableDescriptor htd = createHTableDescriptor(clonedTableName, CF); + + long procId1 = procExec.submitProcedure( + new CloneSnapshotProcedure(procExec.getEnvironment(), htd, getSnapshot()), nonceGroup, nonce); + long procId2 = procExec.submitProcedure( + new CloneSnapshotProcedure(procExec.getEnvironment(), htd, getSnapshot()), nonceGroup, nonce); + + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId1); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); + // The second proc should succeed too - because it is the same proc. + ProcedureTestingUtility.waitProcedure(procExec, procId2); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId2); + assertTrue(procId1 == procId2); + } + + @Test(timeout=60000) + public void testCloneSnapshotToSameTable() throws Exception { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + final TableName clonedTableName = TableName.valueOf(getSnapshot().getTable()); + final HTableDescriptor htd = createHTableDescriptor(clonedTableName, CF); + + long procId = ProcedureTestingUtility.submitAndWait( + procExec, new CloneSnapshotProcedure(procExec.getEnvironment(), htd, getSnapshot())); + ProcedureInfo result = procExec.getResult(procId); + assertTrue(result.isFailed()); + LOG.debug("Clone snapshot failed with exception: " + result.getExceptionFullMessage()); + assertTrue( + ProcedureTestingUtility.getExceptionCause(result) instanceof TableExistsException); + } + + @Test(timeout=60000) + public void testRecoveryAndDoubleExecution() throws Exception { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + final TableName clonedTableName = TableName.valueOf("testRecoveryAndDoubleExecution"); + final HTableDescriptor htd = createHTableDescriptor(clonedTableName, CF); + + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Clone snapshot procedure && kill the executor + long procId = procExec.submitProcedure( + new CloneSnapshotProcedure(procExec.getEnvironment(), htd, getSnapshot()), nonceGroup, nonce); + + // Restart the executor and execute the step twice + int numberOfSteps = CloneSnapshotState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution( + procExec, + procId, + numberOfSteps, + CloneSnapshotState.values()); + + MasterProcedureTestingUtility.validateTableIsEnabled( + UTIL.getHBaseCluster().getMaster(), + clonedTableName); + } + + @Test(timeout = 60000) + public void testRollbackAndDoubleExecution() throws Exception { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + final TableName clonedTableName = TableName.valueOf("testRollbackAndDoubleExecution"); + final HTableDescriptor htd = createHTableDescriptor(clonedTableName, CF); + + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Clone snapshot procedure && kill the executor + long procId = procExec.submitProcedure( + new CloneSnapshotProcedure(procExec.getEnvironment(), htd, getSnapshot()), nonceGroup, nonce); + + int numberOfSteps = CloneSnapshotState.values().length - 2; // failing in the middle of proc + MasterProcedureTestingUtility.testRollbackAndDoubleExecution( + procExec, + procId, + numberOfSteps, + CloneSnapshotState.values()); + + MasterProcedureTestingUtility.validateTableDeletion( + UTIL.getHBaseCluster().getMaster(), clonedTableName); + + } + + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java index 8f6e11a..5cec469 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java @@ -253,7 +253,7 @@ public class TestCreateTableProcedure { procExec, procId, 4, CreateTableState.values()); MasterProcedureTestingUtility.validateTableDeletion( - UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); + UTIL.getHBaseCluster().getMaster(), tableName); // are we able to create the table after a rollback? resetProcExecutorTestingKillFlag(); @@ -310,7 +310,7 @@ public class TestCreateTableProcedure { procExec, procId, 4, CreateTableState.values()); TableName tableName = htd.getTableName(); MasterProcedureTestingUtility.validateTableDeletion( - UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); + UTIL.getHBaseCluster().getMaster(), tableName); // are we able to create the table after a rollback? resetProcExecutorTestingKillFlag(); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java index 4f62537..7eb12cd 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java @@ -139,7 +139,7 @@ public class TestDeleteTableProcedure { // First delete should succeed ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); MasterProcedureTestingUtility.validateTableDeletion( - UTIL.getHBaseCluster().getMaster(), tableName, regions, "f"); + UTIL.getHBaseCluster().getMaster(), tableName); // Second delete should fail with TableNotFound ProcedureInfo result = procExec.getResult(procId2); @@ -171,7 +171,7 @@ public class TestDeleteTableProcedure { // First delete should succeed ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); MasterProcedureTestingUtility.validateTableDeletion( - UTIL.getHBaseCluster().getMaster(), tableName, regions, "f"); + UTIL.getHBaseCluster().getMaster(), tableName); // Second delete should not fail, because it is the same delete ProcedureTestingUtility.assertProcNotFailed(procExec, procId2); @@ -205,7 +205,7 @@ public class TestDeleteTableProcedure { new DeleteTableProcedure(procExec.getEnvironment(), tableName)); ProcedureTestingUtility.assertProcNotFailed(procExec, procId); MasterProcedureTestingUtility.validateTableDeletion( - UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); + UTIL.getHBaseCluster().getMaster(), tableName); } @Test(timeout=60000) @@ -233,7 +233,7 @@ public class TestDeleteTableProcedure { procExec, procId, 6, DeleteTableState.values()); MasterProcedureTestingUtility.validateTableDeletion( - UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); + UTIL.getHBaseCluster().getMaster(), tableName); } private ProcedureExecutor getMasterProcedureExecutor() { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java index e880cd1..6098d40 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java @@ -301,7 +301,7 @@ public class TestMasterFailoverWithProcedures { testRecoveryAndDoubleExecution(UTIL, procId, step, DeleteTableState.values()); MasterProcedureTestingUtility.validateTableDeletion( - UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); + UTIL.getHBaseCluster().getMaster(), tableName); } // ========================================================================== diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java index 49143f1..17b6e09 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedureAdmin.java @@ -142,7 +142,7 @@ public class TestProcedureAdmin { ProcedureTestingUtility.assertProcNotFailed(procExec, procId); // Validate the delete table procedure was not aborted MasterProcedureTestingUtility.validateTableDeletion( - UTIL.getHBaseCluster().getMaster(), tableName, regions, "f"); + UTIL.getHBaseCluster().getMaster(), tableName); } @Test(timeout=60000) diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java new file mode 100644 index 0000000..44d6988 --- /dev/null +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java @@ -0,0 +1,291 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ProcedureInfo; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotDisabledException; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState; +import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; + +@Category({MasterTests.class, MediumTests.class}) +public class TestRestoreSnapshotProcedure { + private static final Log LOG = LogFactory.getLog(TestRestoreSnapshotProcedure.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + protected final TableName snapshotTableName = TableName.valueOf("testRestoreSnapshot"); + protected final byte[] CF1 = Bytes.toBytes("cf1"); + protected final byte[] CF2 = Bytes.toBytes("cf2"); + protected final byte[] CF3 = Bytes.toBytes("cf3"); + protected final byte[] CF4 = Bytes.toBytes("cf4"); + protected final int rowCountCF1 = 10; + protected final int rowCountCF2 = 40; + protected final int rowCountCF3 = 40; + protected final int rowCountCF4 = 40; + protected final int rowCountCF1addition = 10; + + private static long nonceGroup = HConstants.NO_NONCE; + private static long nonce = HConstants.NO_NONCE; + + private SnapshotDescription snapshot = null; + private HTableDescriptor snapshotHTD = null; + + private static void setupConf(Configuration conf) { + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + } + + @BeforeClass + public static void setupCluster() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(1); + } + + @AfterClass + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Before + public void setup() throws Exception { + resetProcExecutorTestingKillFlag(); + nonceGroup = + MasterProcedureTestingUtility.generateNonceGroup(UTIL.getHBaseCluster().getMaster()); + nonce = MasterProcedureTestingUtility.generateNonce(UTIL.getHBaseCluster().getMaster()); + + setupSnapshotAndUpdateTable(); + } + + @After + public void tearDown() throws Exception { + resetProcExecutorTestingKillFlag(); + UTIL.deleteTable(snapshotTableName); + SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin()); + SnapshotTestingUtils.deleteArchiveDirectory(UTIL); + } + + private void resetProcExecutorTestingKillFlag() { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); + assertTrue("expected executor to be running", procExec.isRunning()); + } + + private int getNumReplicas() { + return 1; + } + + private void setupSnapshotAndUpdateTable() throws Exception { + long tid = System.currentTimeMillis(); + final byte[] snapshotName = Bytes.toBytes("snapshot-" + tid); + Admin admin = UTIL.getHBaseAdmin(); + // create Table + SnapshotTestingUtils.createTable(UTIL, snapshotTableName, getNumReplicas(), CF1, CF2); + // Load data + SnapshotTestingUtils.loadData(UTIL, snapshotTableName, rowCountCF1, CF1); + SnapshotTestingUtils.loadData(UTIL, snapshotTableName, rowCountCF2, CF2); + SnapshotTestingUtils.verifyRowCount(UTIL, snapshotTableName, rowCountCF1 + rowCountCF2); + + snapshotHTD = admin.getTableDescriptor(snapshotTableName); + + admin.disableTable(snapshotTableName); + // take a snapshot + admin.snapshot(snapshotName, snapshotTableName); + + List snapshotList = admin.listSnapshots(); + snapshot = snapshotList.get(0); + + // modify the table + HColumnDescriptor columnFamilyDescriptor3 = new HColumnDescriptor(CF3); + HColumnDescriptor columnFamilyDescriptor4 = new HColumnDescriptor(CF4); + admin.addColumnFamily(snapshotTableName, columnFamilyDescriptor3); + admin.addColumnFamily(snapshotTableName, columnFamilyDescriptor4); + admin.deleteColumnFamily(snapshotTableName, CF2); + // enable table and insert data + admin.enableTable(snapshotTableName); + SnapshotTestingUtils.loadData(UTIL, snapshotTableName, rowCountCF3, CF3); + SnapshotTestingUtils.loadData(UTIL, snapshotTableName, rowCountCF4, CF4); + SnapshotTestingUtils.loadData(UTIL, snapshotTableName, rowCountCF1addition, CF1); + HTableDescriptor currentHTD = admin.getTableDescriptor(snapshotTableName); + assertTrue(currentHTD.hasFamily(CF1)); + assertFalse(currentHTD.hasFamily(CF2)); + assertTrue(currentHTD.hasFamily(CF3)); + assertTrue(currentHTD.hasFamily(CF4)); + assertNotEquals(currentHTD.getFamiliesKeys().size(), snapshotHTD.getFamiliesKeys().size()); + SnapshotTestingUtils.verifyRowCount( + UTIL, snapshotTableName, rowCountCF1 + rowCountCF3 + rowCountCF4 + rowCountCF1addition); + admin.disableTable(snapshotTableName); + } + + private static HTableDescriptor createHTableDescriptor( + final TableName tableName, final byte[] ... family) { + HTableDescriptor htd = new HTableDescriptor(tableName); + for (int i = 0; i < family.length; ++i) { + htd.addFamily(new HColumnDescriptor(family[i])); + } + return htd; + } + + @Test(timeout=600000) + public void testRestoreSnapshot() throws Exception { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + long procId = ProcedureTestingUtility.submitAndWait( + procExec, + new RestoreSnapshotProcedure(procExec.getEnvironment(), snapshotHTD, snapshot)); + ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId)); + + validateSnapshotRestore(); + } + + @Test(timeout = 60000) + public void testRestoreSnapshotTwiceWithSameNonce() throws Exception { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + long procId1 = procExec.submitProcedure( + new RestoreSnapshotProcedure(procExec.getEnvironment(), snapshotHTD, snapshot), + nonceGroup, + nonce); + long procId2 = procExec.submitProcedure( + new RestoreSnapshotProcedure(procExec.getEnvironment(), snapshotHTD, snapshot), + nonceGroup, + nonce); + + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId1); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); + // The second proc should succeed too - because it is the same proc. + ProcedureTestingUtility.waitProcedure(procExec, procId2); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId2); + assertTrue(procId1 == procId2); + + validateSnapshotRestore(); + } + + @Test(timeout=60000) + public void testRestoreSnapshotToDifferentTable() throws Exception { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + final TableName restoredTableName = TableName.valueOf("testRestoreSnapshotToDifferentTable"); + final HTableDescriptor newHTD = createHTableDescriptor(restoredTableName, CF1, CF2); + + long procId = ProcedureTestingUtility.submitAndWait( + procExec, new RestoreSnapshotProcedure(procExec.getEnvironment(), newHTD, snapshot)); + ProcedureInfo result = procExec.getResult(procId); + assertTrue(result.isFailed()); + LOG.debug("Restore snapshot failed with exception: " + result.getExceptionFullMessage()); + assertTrue( + ProcedureTestingUtility.getExceptionCause(result) instanceof TableNotFoundException); + } + + @Test(timeout=60000) + public void testRestoreSnapshotToEnabledTable() throws Exception { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + try { + UTIL.getHBaseAdmin().enableTable(snapshotTableName); + + long procId = ProcedureTestingUtility.submitAndWait( + procExec, + new RestoreSnapshotProcedure(procExec.getEnvironment(), snapshotHTD, snapshot)); + ProcedureInfo result = procExec.getResult(procId); + assertTrue(result.isFailed()); + LOG.debug("Restore snapshot failed with exception: " + result.getExceptionFullMessage()); + assertTrue( + ProcedureTestingUtility.getExceptionCause(result) instanceof TableNotDisabledException); + } finally { + UTIL.getHBaseAdmin().disableTable(snapshotTableName); + } + } + + @Test(timeout=60000) + public void testRecoveryAndDoubleExecution() throws Exception { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Restore snapshot procedure && kill the executor + long procId = procExec.submitProcedure( + new RestoreSnapshotProcedure(procExec.getEnvironment(), snapshotHTD, snapshot), + nonceGroup, + nonce); + + // Restart the executor and execute the step twice + int numberOfSteps = RestoreSnapshotState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution( + procExec, + procId, + numberOfSteps, + RestoreSnapshotState.values()); + + resetProcExecutorTestingKillFlag(); + validateSnapshotRestore(); + } + + private void validateSnapshotRestore() throws IOException { + try { + UTIL.getHBaseAdmin().enableTable(snapshotTableName); + + HTableDescriptor currentHTD = UTIL.getHBaseAdmin().getTableDescriptor(snapshotTableName); + assertTrue(currentHTD.hasFamily(CF1)); + assertTrue(currentHTD.hasFamily(CF2)); + assertFalse(currentHTD.hasFamily(CF3)); + assertFalse(currentHTD.hasFamily(CF4)); + assertEquals(currentHTD.getFamiliesKeys().size(), snapshotHTD.getFamiliesKeys().size()); + SnapshotTestingUtils.verifyRowCount(UTIL, snapshotTableName, rowCountCF1 + rowCountCF2); + } finally { + UTIL.getHBaseAdmin().disableTable(snapshotTableName); + } + } + + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } +}