done);
+
+ /**
* rpc Balance(.hbase.pb.BalanceRequest) returns (.hbase.pb.BalanceResponse);
*
*
@@ -61160,6 +61953,14 @@ public final class MasterProtos {
}
@java.lang.Override
+ public void isMasterInMaintenanceMode(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request,
+ com.google.protobuf.RpcCallback done) {
+ impl.isMasterInMaintenanceMode(controller, request, done);
+ }
+
+ @java.lang.Override
public void balance(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request,
@@ -61512,76 +62313,78 @@ public final class MasterProtos {
case 20:
return impl.stopMaster(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest)request);
case 21:
- return impl.balance(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)request);
+ return impl.isMasterInMaintenanceMode(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest)request);
case 22:
- return impl.setBalancerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest)request);
+ return impl.balance(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)request);
case 23:
- return impl.isBalancerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)request);
+ return impl.setBalancerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest)request);
case 24:
- return impl.setSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest)request);
+ return impl.isBalancerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)request);
case 25:
- return impl.isSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest)request);
+ return impl.setSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest)request);
case 26:
- return impl.releaseSplitOrMergeLockAndRollback(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest)request);
+ return impl.isSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest)request);
case 27:
- return impl.normalize(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest)request);
+ return impl.releaseSplitOrMergeLockAndRollback(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest)request);
case 28:
- return impl.setNormalizerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request);
+ return impl.normalize(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest)request);
case 29:
- return impl.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request);
+ return impl.setNormalizerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request);
case 30:
- return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request);
+ return impl.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request);
case 31:
- return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request);
+ return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request);
case 32:
- return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request);
+ return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request);
case 33:
- return impl.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request);
+ return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request);
case 34:
- return impl.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request);
+ return impl.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request);
case 35:
- return impl.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request);
+ return impl.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request);
case 36:
- return impl.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request);
+ return impl.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request);
case 37:
- return impl.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request);
+ return impl.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request);
case 38:
- return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request);
+ return impl.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request);
case 39:
- return impl.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request);
+ return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request);
case 40:
- return impl.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request);
+ return impl.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request);
case 41:
- return impl.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request);
+ return impl.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request);
case 42:
- return impl.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request);
+ return impl.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request);
case 43:
- return impl.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request);
+ return impl.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request);
case 44:
- return impl.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request);
+ return impl.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request);
case 45:
- return impl.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request);
+ return impl.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request);
case 46:
- return impl.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request);
+ return impl.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request);
case 47:
- return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request);
+ return impl.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request);
case 48:
- return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request);
+ return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request);
case 49:
- return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request);
+ return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request);
case 50:
- return impl.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request);
+ return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request);
case 51:
- return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request);
+ return impl.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request);
case 52:
- return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request);
+ return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request);
case 53:
- return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request);
+ return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request);
case 54:
- return impl.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request);
+ return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request);
case 55:
- return impl.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request);
+ return impl.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request);
case 56:
+ return impl.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request);
+ case 57:
return impl.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request);
default:
throw new java.lang.AssertionError("Can't get here.");
@@ -61640,76 +62443,78 @@ public final class MasterProtos {
case 20:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest.getDefaultInstance();
case 21:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.getDefaultInstance();
case 22:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance();
case 23:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance();
case 24:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance();
case 25:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest.getDefaultInstance();
case 26:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance();
case 27:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance();
case 28:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance();
case 29:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance();
case 30:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance();
case 31:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance();
case 32:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance();
case 33:
- return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance();
case 34:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance();
case 35:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance();
case 36:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance();
case 37:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance();
case 38:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance();
case 39:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance();
case 40:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
case 41:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
case 42:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance();
case 43:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance();
case 44:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance();
case 45:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance();
case 46:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance();
case 47:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance();
case 48:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance();
case 49:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance();
case 50:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
case 51:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance();
case 52:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
case 53:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
case 54:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance();
case 55:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance();
case 56:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
+ case 57:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
@@ -61768,76 +62573,78 @@ public final class MasterProtos {
case 20:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance();
case 21:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance();
case 22:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance();
case 23:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance();
case 24:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance();
case 25:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance();
case 26:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance();
case 27:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance();
case 28:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance();
case 29:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance();
case 30:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance();
case 31:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance();
case 32:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance();
case 33:
- return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance();
case 34:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance();
case 35:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance();
case 36:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance();
case 37:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance();
case 38:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance();
case 39:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance();
case 40:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
case 41:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
case 42:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance();
case 43:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance();
case 44:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance();
case 45:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance();
case 46:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance();
case 47:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance();
case 48:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance();
case 49:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance();
case 50:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
case 51:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance();
case 52:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
case 53:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
case 54:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance();
case 55:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance();
case 56:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
+ case 57:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
@@ -62108,6 +62915,19 @@ public final class MasterProtos {
com.google.protobuf.RpcCallback done);
/**
+ * rpc IsMasterInMaintenanceMode(.hbase.pb.IsInMaintenanceModeRequest) returns (.hbase.pb.IsInMaintenanceModeResponse);
+ *
+ *
+ **
+ * Query whether the Master is in maintenance mode.
+ *
+ */
+ public abstract void isMasterInMaintenanceMode(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ /**
* rpc Balance(.hbase.pb.BalanceRequest) returns (.hbase.pb.BalanceResponse);
*
*
@@ -62688,181 +63508,186 @@ public final class MasterProtos {
done));
return;
case 21:
+ this.isMasterInMaintenanceMode(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
+ case 22:
this.balance(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 22:
+ case 23:
this.setBalancerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 23:
+ case 24:
this.isBalancerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 24:
+ case 25:
this.setSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 25:
+ case 26:
this.isSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 26:
+ case 27:
this.releaseSplitOrMergeLockAndRollback(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 27:
+ case 28:
this.normalize(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 28:
+ case 29:
this.setNormalizerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 29:
+ case 30:
this.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 30:
+ case 31:
this.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 31:
+ case 32:
this.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 32:
+ case 33:
this.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 33:
+ case 34:
this.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 34:
+ case 35:
this.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 35:
+ case 36:
this.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 36:
+ case 37:
this.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 37:
+ case 38:
this.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 38:
+ case 39:
this.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 39:
+ case 40:
this.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 40:
+ case 41:
this.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 41:
+ case 42:
this.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 42:
+ case 43:
this.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 43:
+ case 44:
this.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 44:
+ case 45:
this.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 45:
+ case 46:
this.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 46:
+ case 47:
this.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 47:
+ case 48:
this.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 48:
+ case 49:
this.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 49:
+ case 50:
this.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 50:
+ case 51:
this.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 51:
+ case 52:
this.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 52:
+ case 53:
this.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 53:
+ case 54:
this.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 54:
+ case 55:
this.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 55:
+ case 56:
this.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
- case 56:
+ case 57:
this.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request,
com.google.protobuf.RpcUtil.specializeCallback(
done));
@@ -62924,76 +63749,78 @@ public final class MasterProtos {
case 20:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest.getDefaultInstance();
case 21:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.getDefaultInstance();
case 22:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance();
case 23:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance();
case 24:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance();
case 25:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest.getDefaultInstance();
case 26:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance();
case 27:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance();
case 28:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance();
case 29:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance();
case 30:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance();
case 31:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance();
case 32:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance();
case 33:
- return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance();
case 34:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance();
case 35:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance();
case 36:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance();
case 37:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance();
case 38:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance();
case 39:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance();
case 40:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
case 41:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance();
case 42:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance();
case 43:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance();
case 44:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance();
case 45:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance();
case 46:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance();
case 47:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance();
case 48:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance();
case 49:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance();
case 50:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
case 51:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance();
case 52:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
case 53:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
case 54:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance();
case 55:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance();
case 56:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
+ case 57:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
@@ -63052,76 +63879,78 @@ public final class MasterProtos {
case 20:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance();
case 21:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance();
case 22:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance();
case 23:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance();
case 24:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance();
case 25:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance();
case 26:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance();
case 27:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance();
case 28:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance();
case 29:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance();
case 30:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance();
case 31:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance();
case 32:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance();
case 33:
- return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance();
case 34:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance();
case 35:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance();
case 36:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance();
case 37:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance();
case 38:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance();
case 39:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance();
case 40:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
case 41:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance();
case 42:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance();
case 43:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance();
case 44:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance();
case 45:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance();
case 46:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance();
case 47:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance();
case 48:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance();
case 49:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance();
case 50:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
case 51:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance();
case 52:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
case 53:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
case 54:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance();
case 55:
- return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance();
case 56:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
+ case 57:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
@@ -63459,12 +64288,27 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance()));
}
+ public void isMasterInMaintenanceMode(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(21),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance()));
+ }
+
public void balance(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(21),
+ getDescriptor().getMethods().get(22),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(),
@@ -63479,7 +64323,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(22),
+ getDescriptor().getMethods().get(23),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(),
@@ -63494,7 +64338,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(23),
+ getDescriptor().getMethods().get(24),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance(),
@@ -63509,7 +64353,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(24),
+ getDescriptor().getMethods().get(25),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance(),
@@ -63524,7 +64368,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(25),
+ getDescriptor().getMethods().get(26),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance(),
@@ -63539,7 +64383,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(26),
+ getDescriptor().getMethods().get(27),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance(),
@@ -63554,7 +64398,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(27),
+ getDescriptor().getMethods().get(28),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(),
@@ -63569,7 +64413,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(28),
+ getDescriptor().getMethods().get(29),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(),
@@ -63584,7 +64428,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(29),
+ getDescriptor().getMethods().get(30),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(),
@@ -63599,7 +64443,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(30),
+ getDescriptor().getMethods().get(31),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(),
@@ -63614,7 +64458,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(31),
+ getDescriptor().getMethods().get(32),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(),
@@ -63629,7 +64473,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(32),
+ getDescriptor().getMethods().get(33),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(),
@@ -63644,7 +64488,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(33),
+ getDescriptor().getMethods().get(34),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(),
@@ -63659,7 +64503,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(34),
+ getDescriptor().getMethods().get(35),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(),
@@ -63674,7 +64518,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(35),
+ getDescriptor().getMethods().get(36),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(),
@@ -63689,7 +64533,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(36),
+ getDescriptor().getMethods().get(37),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(),
@@ -63704,7 +64548,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(37),
+ getDescriptor().getMethods().get(38),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(),
@@ -63719,7 +64563,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(38),
+ getDescriptor().getMethods().get(39),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(),
@@ -63734,7 +64578,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(39),
+ getDescriptor().getMethods().get(40),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(),
@@ -63749,7 +64593,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(40),
+ getDescriptor().getMethods().get(41),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(),
@@ -63764,7 +64608,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(41),
+ getDescriptor().getMethods().get(42),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(),
@@ -63779,7 +64623,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(42),
+ getDescriptor().getMethods().get(43),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(),
@@ -63794,7 +64638,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(43),
+ getDescriptor().getMethods().get(44),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(),
@@ -63809,7 +64653,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(44),
+ getDescriptor().getMethods().get(45),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(),
@@ -63824,7 +64668,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(45),
+ getDescriptor().getMethods().get(46),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(),
@@ -63839,7 +64683,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(46),
+ getDescriptor().getMethods().get(47),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(),
@@ -63854,7 +64698,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(47),
+ getDescriptor().getMethods().get(48),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(),
@@ -63869,7 +64713,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(48),
+ getDescriptor().getMethods().get(49),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(),
@@ -63884,7 +64728,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(49),
+ getDescriptor().getMethods().get(50),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(),
@@ -63899,7 +64743,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(50),
+ getDescriptor().getMethods().get(51),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(),
@@ -63914,7 +64758,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(51),
+ getDescriptor().getMethods().get(52),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(),
@@ -63929,7 +64773,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(52),
+ getDescriptor().getMethods().get(53),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(),
@@ -63944,7 +64788,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(53),
+ getDescriptor().getMethods().get(54),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(),
@@ -63959,7 +64803,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(54),
+ getDescriptor().getMethods().get(55),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(),
@@ -63974,7 +64818,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(55),
+ getDescriptor().getMethods().get(56),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(),
@@ -63989,7 +64833,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request,
com.google.protobuf.RpcCallback done) {
channel.callMethod(
- getDescriptor().getMethods().get(56),
+ getDescriptor().getMethods().get(57),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(),
@@ -64111,6 +64955,11 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest request)
throws com.google.protobuf.ServiceException;
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse isMasterInMaintenanceMode(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request)
+ throws com.google.protobuf.ServiceException;
+
public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse balance(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request)
@@ -64551,12 +65400,24 @@ public final class MasterProtos {
}
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse isMasterInMaintenanceMode(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(21),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance());
+ }
+
+
public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse balance(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(21),
+ getDescriptor().getMethods().get(22),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance());
@@ -64568,7 +65429,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(22),
+ getDescriptor().getMethods().get(23),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance());
@@ -64580,7 +65441,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(23),
+ getDescriptor().getMethods().get(24),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance());
@@ -64592,7 +65453,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(24),
+ getDescriptor().getMethods().get(25),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance());
@@ -64604,7 +65465,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(25),
+ getDescriptor().getMethods().get(26),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance());
@@ -64616,7 +65477,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(26),
+ getDescriptor().getMethods().get(27),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance());
@@ -64628,7 +65489,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(27),
+ getDescriptor().getMethods().get(28),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance());
@@ -64640,7 +65501,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(28),
+ getDescriptor().getMethods().get(29),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance());
@@ -64652,7 +65513,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(29),
+ getDescriptor().getMethods().get(30),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance());
@@ -64664,7 +65525,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(30),
+ getDescriptor().getMethods().get(31),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance());
@@ -64676,7 +65537,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(31),
+ getDescriptor().getMethods().get(32),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance());
@@ -64688,7 +65549,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(32),
+ getDescriptor().getMethods().get(33),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance());
@@ -64700,7 +65561,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(33),
+ getDescriptor().getMethods().get(34),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance());
@@ -64712,7 +65573,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(34),
+ getDescriptor().getMethods().get(35),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance());
@@ -64724,7 +65585,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(35),
+ getDescriptor().getMethods().get(36),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance());
@@ -64736,7 +65597,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(36),
+ getDescriptor().getMethods().get(37),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance());
@@ -64748,7 +65609,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(37),
+ getDescriptor().getMethods().get(38),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance());
@@ -64760,7 +65621,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(38),
+ getDescriptor().getMethods().get(39),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance());
@@ -64772,7 +65633,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(39),
+ getDescriptor().getMethods().get(40),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance());
@@ -64784,7 +65645,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(40),
+ getDescriptor().getMethods().get(41),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance());
@@ -64796,7 +65657,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(41),
+ getDescriptor().getMethods().get(42),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance());
@@ -64808,7 +65669,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(42),
+ getDescriptor().getMethods().get(43),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance());
@@ -64820,7 +65681,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(43),
+ getDescriptor().getMethods().get(44),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance());
@@ -64832,7 +65693,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(44),
+ getDescriptor().getMethods().get(45),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance());
@@ -64844,7 +65705,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(45),
+ getDescriptor().getMethods().get(46),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance());
@@ -64856,7 +65717,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(46),
+ getDescriptor().getMethods().get(47),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance());
@@ -64868,7 +65729,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(47),
+ getDescriptor().getMethods().get(48),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance());
@@ -64880,7 +65741,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(48),
+ getDescriptor().getMethods().get(49),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance());
@@ -64892,7 +65753,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(49),
+ getDescriptor().getMethods().get(50),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance());
@@ -64904,7 +65765,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(50),
+ getDescriptor().getMethods().get(51),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance());
@@ -64916,7 +65777,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(51),
+ getDescriptor().getMethods().get(52),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance());
@@ -64928,7 +65789,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(52),
+ getDescriptor().getMethods().get(53),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance());
@@ -64940,7 +65801,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(53),
+ getDescriptor().getMethods().get(54),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance());
@@ -64952,7 +65813,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(54),
+ getDescriptor().getMethods().get(55),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance());
@@ -64964,7 +65825,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(55),
+ getDescriptor().getMethods().get(56),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance());
@@ -64976,7 +65837,7 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(56),
+ getDescriptor().getMethods().get(57),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance());
@@ -65218,6 +66079,16 @@ public final class MasterProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_StopMasterResponse_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_IsInMaintenanceModeRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_IsInMaintenanceModeResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_BalanceRequest_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -65636,251 +66507,256 @@ public final class MasterProtos {
"aceResponse\022&\n\ttableName\030\001 \003(\0132\023.hbase.p" +
"b.TableName\"\021\n\017ShutdownRequest\"\022\n\020Shutdo" +
"wnResponse\"\023\n\021StopMasterRequest\"\024\n\022StopM" +
- "asterResponse\"\037\n\016BalanceRequest\022\r\n\005force" +
- "\030\001 \001(\010\"\'\n\017BalanceResponse\022\024\n\014balancer_ra" +
- "n\030\001 \002(\010\"<\n\031SetBalancerRunningRequest\022\n\n\002" +
- "on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\"8\n\032SetBala" +
- "ncerRunningResponse\022\032\n\022prev_balance_valu",
- "e\030\001 \001(\010\"\032\n\030IsBalancerEnabledRequest\",\n\031I" +
- "sBalancerEnabledResponse\022\017\n\007enabled\030\001 \002(" +
- "\010\"\212\001\n\035SetSplitOrMergeEnabledRequest\022\017\n\007e" +
- "nabled\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\0220\n\014swi" +
- "tch_types\030\003 \003(\0162\032.hbase.pb.MasterSwitchT" +
- "ype\022\021\n\tskip_lock\030\004 \001(\010\"4\n\036SetSplitOrMerg" +
- "eEnabledResponse\022\022\n\nprev_value\030\001 \003(\010\"O\n\034" +
- "IsSplitOrMergeEnabledRequest\022/\n\013switch_t" +
- "ype\030\001 \002(\0162\032.hbase.pb.MasterSwitchType\"0\n" +
- "\035IsSplitOrMergeEnabledResponse\022\017\n\007enable",
- "d\030\001 \002(\010\"+\n)ReleaseSplitOrMergeLockAndRol" +
- "lbackRequest\",\n*ReleaseSplitOrMergeLockA" +
- "ndRollbackResponse\"\022\n\020NormalizeRequest\"+" +
- "\n\021NormalizeResponse\022\026\n\016normalizer_ran\030\001 " +
- "\002(\010\")\n\033SetNormalizerRunningRequest\022\n\n\002on" +
- "\030\001 \002(\010\"=\n\034SetNormalizerRunningResponse\022\035" +
- "\n\025prev_normalizer_value\030\001 \001(\010\"\034\n\032IsNorma" +
- "lizerEnabledRequest\".\n\033IsNormalizerEnabl" +
- "edResponse\022\017\n\007enabled\030\001 \002(\010\"\027\n\025RunCatalo" +
- "gScanRequest\"-\n\026RunCatalogScanResponse\022\023",
- "\n\013scan_result\030\001 \001(\005\"-\n\033EnableCatalogJani" +
- "torRequest\022\016\n\006enable\030\001 \002(\010\"2\n\034EnableCata" +
- "logJanitorResponse\022\022\n\nprev_value\030\001 \001(\010\" " +
- "\n\036IsCatalogJanitorEnabledRequest\"0\n\037IsCa" +
- "talogJanitorEnabledResponse\022\r\n\005value\030\001 \002" +
- "(\010\"B\n\017SnapshotRequest\022/\n\010snapshot\030\001 \002(\0132" +
- "\035.hbase.pb.SnapshotDescription\",\n\020Snapsh" +
- "otResponse\022\030\n\020expected_timeout\030\001 \002(\003\"\036\n\034" +
- "GetCompletedSnapshotsRequest\"Q\n\035GetCompl" +
- "etedSnapshotsResponse\0220\n\tsnapshots\030\001 \003(\013",
- "2\035.hbase.pb.SnapshotDescription\"H\n\025Delet" +
- "eSnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hba" +
- "se.pb.SnapshotDescription\"\030\n\026DeleteSnaps" +
- "hotResponse\"s\n\026RestoreSnapshotRequest\022/\n" +
- "\010snapshot\030\001 \002(\0132\035.hbase.pb.SnapshotDescr" +
- "iption\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030" +
- "\003 \001(\004:\0010\"*\n\027RestoreSnapshotResponse\022\017\n\007p" +
- "roc_id\030\001 \002(\004\"H\n\025IsSnapshotDoneRequest\022/\n" +
- "\010snapshot\030\001 \001(\0132\035.hbase.pb.SnapshotDescr" +
- "iption\"^\n\026IsSnapshotDoneResponse\022\023\n\004done",
- "\030\001 \001(\010:\005false\022/\n\010snapshot\030\002 \001(\0132\035.hbase." +
- "pb.SnapshotDescription\"O\n\034IsRestoreSnaps" +
- "hotDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.hbase" +
- ".pb.SnapshotDescription\"4\n\035IsRestoreSnap" +
- "shotDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\"F\n" +
- "\033GetSchemaAlterStatusRequest\022\'\n\ntable_na" +
- "me\030\001 \002(\0132\023.hbase.pb.TableName\"T\n\034GetSche" +
- "maAlterStatusResponse\022\035\n\025yet_to_update_r" +
- "egions\030\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"\213\001\n\032" +
- "GetTableDescriptorsRequest\022(\n\013table_name",
- "s\030\001 \003(\0132\023.hbase.pb.TableName\022\r\n\005regex\030\002 " +
- "\001(\t\022!\n\022include_sys_tables\030\003 \001(\010:\005false\022\021" +
- "\n\tnamespace\030\004 \001(\t\"J\n\033GetTableDescriptors" +
- "Response\022+\n\014table_schema\030\001 \003(\0132\025.hbase.p" +
- "b.TableSchema\"[\n\024GetTableNamesRequest\022\r\n" +
- "\005regex\030\001 \001(\t\022!\n\022include_sys_tables\030\002 \001(\010" +
- ":\005false\022\021\n\tnamespace\030\003 \001(\t\"A\n\025GetTableNa" +
- "mesResponse\022(\n\013table_names\030\001 \003(\0132\023.hbase" +
- ".pb.TableName\"?\n\024GetTableStateRequest\022\'\n" +
- "\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\"B",
- "\n\025GetTableStateResponse\022)\n\013table_state\030\001" +
- " \002(\0132\024.hbase.pb.TableState\"\031\n\027GetCluster" +
- "StatusRequest\"K\n\030GetClusterStatusRespons" +
- "e\022/\n\016cluster_status\030\001 \002(\0132\027.hbase.pb.Clu" +
- "sterStatus\"\030\n\026IsMasterRunningRequest\"4\n\027" +
- "IsMasterRunningResponse\022\031\n\021is_master_run" +
- "ning\030\001 \002(\010\"I\n\024ExecProcedureRequest\0221\n\tpr" +
- "ocedure\030\001 \002(\0132\036.hbase.pb.ProcedureDescri" +
- "ption\"F\n\025ExecProcedureResponse\022\030\n\020expect" +
- "ed_timeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n",
- "\026IsProcedureDoneRequest\0221\n\tprocedure\030\001 \001" +
- "(\0132\036.hbase.pb.ProcedureDescription\"`\n\027Is" +
- "ProcedureDoneResponse\022\023\n\004done\030\001 \001(\010:\005fal" +
- "se\0220\n\010snapshot\030\002 \001(\0132\036.hbase.pb.Procedur" +
- "eDescription\",\n\031GetProcedureResultReques" +
- "t\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetProcedureResul" +
- "tResponse\0229\n\005state\030\001 \002(\0162*.hbase.pb.GetP" +
- "rocedureResultResponse.State\022\022\n\nstart_ti" +
- "me\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006result\030" +
- "\004 \001(\014\0224\n\texception\030\005 \001(\0132!.hbase.pb.Fore",
- "ignExceptionMessage\"1\n\005State\022\r\n\tNOT_FOUN" +
- "D\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortP" +
- "rocedureRequest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayI" +
- "nterruptIfRunning\030\002 \001(\010:\004true\"6\n\026AbortPr" +
- "ocedureResponse\022\034\n\024is_procedure_aborted\030" +
- "\001 \002(\010\"\027\n\025ListProceduresRequest\"@\n\026ListPr" +
- "oceduresResponse\022&\n\tprocedure\030\001 \003(\0132\023.hb" +
- "ase.pb.Procedure\"\315\001\n\017SetQuotaRequest\022\021\n\t" +
- "user_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tn" +
- "amespace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hba",
- "se.pb.TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016b" +
- "ypass_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031." +
- "hbase.pb.ThrottleRequest\"\022\n\020SetQuotaResp" +
- "onse\"J\n\037MajorCompactionTimestampRequest\022" +
- "\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName" +
- "\"U\n(MajorCompactionTimestampForRegionReq" +
- "uest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSp" +
- "ecifier\"@\n MajorCompactionTimestampRespo" +
- "nse\022\034\n\024compaction_timestamp\030\001 \002(\003\"\035\n\033Sec" +
- "urityCapabilitiesRequest\"\354\001\n\034SecurityCap",
- "abilitiesResponse\022G\n\014capabilities\030\001 \003(\0162" +
- "1.hbase.pb.SecurityCapabilitiesResponse." +
- "Capability\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTH" +
- "ENTICATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022" +
- "\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION" +
- "\020\003\022\023\n\017CELL_VISIBILITY\020\004*(\n\020MasterSwitchT" +
- "ype\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\373(\n\rMasterServ" +
- "ice\022e\n\024GetSchemaAlterStatus\022%.hbase.pb.G" +
- "etSchemaAlterStatusRequest\032&.hbase.pb.Ge" +
- "tSchemaAlterStatusResponse\022b\n\023GetTableDe",
- "scriptors\022$.hbase.pb.GetTableDescriptors" +
- "Request\032%.hbase.pb.GetTableDescriptorsRe" +
- "sponse\022P\n\rGetTableNames\022\036.hbase.pb.GetTa" +
- "bleNamesRequest\032\037.hbase.pb.GetTableNames" +
- "Response\022Y\n\020GetClusterStatus\022!.hbase.pb." +
- "GetClusterStatusRequest\032\".hbase.pb.GetCl" +
- "usterStatusResponse\022V\n\017IsMasterRunning\022 " +
- ".hbase.pb.IsMasterRunningRequest\032!.hbase" +
- ".pb.IsMasterRunningResponse\022D\n\tAddColumn" +
- "\022\032.hbase.pb.AddColumnRequest\032\033.hbase.pb.",
- "AddColumnResponse\022M\n\014DeleteColumn\022\035.hbas" +
- "e.pb.DeleteColumnRequest\032\036.hbase.pb.Dele" +
- "teColumnResponse\022M\n\014ModifyColumn\022\035.hbase" +
- ".pb.ModifyColumnRequest\032\036.hbase.pb.Modif" +
- "yColumnResponse\022G\n\nMoveRegion\022\033.hbase.pb" +
- ".MoveRegionRequest\032\034.hbase.pb.MoveRegion" +
- "Response\022k\n\026DispatchMergingRegions\022\'.hba" +
- "se.pb.DispatchMergingRegionsRequest\032(.hb" +
- "ase.pb.DispatchMergingRegionsResponse\022M\n" +
- "\014AssignRegion\022\035.hbase.pb.AssignRegionReq",
- "uest\032\036.hbase.pb.AssignRegionResponse\022S\n\016" +
- "UnassignRegion\022\037.hbase.pb.UnassignRegion" +
- "Request\032 .hbase.pb.UnassignRegionRespons" +
- "e\022P\n\rOfflineRegion\022\036.hbase.pb.OfflineReg" +
- "ionRequest\032\037.hbase.pb.OfflineRegionRespo" +
- "nse\022J\n\013DeleteTable\022\034.hbase.pb.DeleteTabl" +
- "eRequest\032\035.hbase.pb.DeleteTableResponse\022" +
- "P\n\rtruncateTable\022\036.hbase.pb.TruncateTabl" +
- "eRequest\032\037.hbase.pb.TruncateTableRespons" +
- "e\022J\n\013EnableTable\022\034.hbase.pb.EnableTableR",
- "equest\032\035.hbase.pb.EnableTableResponse\022M\n" +
- "\014DisableTable\022\035.hbase.pb.DisableTableReq" +
- "uest\032\036.hbase.pb.DisableTableResponse\022J\n\013" +
- "ModifyTable\022\034.hbase.pb.ModifyTableReques" +
- "t\032\035.hbase.pb.ModifyTableResponse\022J\n\013Crea" +
- "teTable\022\034.hbase.pb.CreateTableRequest\032\035." +
- "hbase.pb.CreateTableResponse\022A\n\010Shutdown" +
- "\022\031.hbase.pb.ShutdownRequest\032\032.hbase.pb.S" +
- "hutdownResponse\022G\n\nStopMaster\022\033.hbase.pb" +
- ".StopMasterRequest\032\034.hbase.pb.StopMaster",
- "Response\022>\n\007Balance\022\030.hbase.pb.BalanceRe" +
- "quest\032\031.hbase.pb.BalanceResponse\022_\n\022SetB" +
- "alancerRunning\022#.hbase.pb.SetBalancerRun" +
- "ningRequest\032$.hbase.pb.SetBalancerRunnin" +
- "gResponse\022\\\n\021IsBalancerEnabled\022\".hbase.p" +
- "b.IsBalancerEnabledRequest\032#.hbase.pb.Is" +
- "BalancerEnabledResponse\022k\n\026SetSplitOrMer" +
- "geEnabled\022\'.hbase.pb.SetSplitOrMergeEnab" +
- "ledRequest\032(.hbase.pb.SetSplitOrMergeEna" +
- "bledResponse\022h\n\025IsSplitOrMergeEnabled\022&.",
- "hbase.pb.IsSplitOrMergeEnabledRequest\032\'." +
- "hbase.pb.IsSplitOrMergeEnabledResponse\022\217" +
- "\001\n\"ReleaseSplitOrMergeLockAndRollback\0223." +
- "hbase.pb.ReleaseSplitOrMergeLockAndRollb" +
- "ackRequest\0324.hbase.pb.ReleaseSplitOrMerg" +
- "eLockAndRollbackResponse\022D\n\tNormalize\022\032." +
- "hbase.pb.NormalizeRequest\032\033.hbase.pb.Nor" +
- "malizeResponse\022e\n\024SetNormalizerRunning\022%" +
- ".hbase.pb.SetNormalizerRunningRequest\032&." +
- "hbase.pb.SetNormalizerRunningResponse\022b\n",
- "\023IsNormalizerEnabled\022$.hbase.pb.IsNormal" +
- "izerEnabledRequest\032%.hbase.pb.IsNormaliz" +
- "erEnabledResponse\022S\n\016RunCatalogScan\022\037.hb" +
- "ase.pb.RunCatalogScanRequest\032 .hbase.pb." +
- "RunCatalogScanResponse\022e\n\024EnableCatalogJ" +
- "anitor\022%.hbase.pb.EnableCatalogJanitorRe" +
- "quest\032&.hbase.pb.EnableCatalogJanitorRes" +
- "ponse\022n\n\027IsCatalogJanitorEnabled\022(.hbase" +
- ".pb.IsCatalogJanitorEnabledRequest\032).hba" +
- "se.pb.IsCatalogJanitorEnabledResponse\022^\n",
- "\021ExecMasterService\022#.hbase.pb.Coprocesso" +
- "rServiceRequest\032$.hbase.pb.CoprocessorSe" +
- "rviceResponse\022A\n\010Snapshot\022\031.hbase.pb.Sna" +
- "pshotRequest\032\032.hbase.pb.SnapshotResponse" +
- "\022h\n\025GetCompletedSnapshots\022&.hbase.pb.Get" +
- "CompletedSnapshotsRequest\032\'.hbase.pb.Get" +
- "CompletedSnapshotsResponse\022S\n\016DeleteSnap" +
- "shot\022\037.hbase.pb.DeleteSnapshotRequest\032 ." +
- "hbase.pb.DeleteSnapshotResponse\022S\n\016IsSna" +
- "pshotDone\022\037.hbase.pb.IsSnapshotDoneReque",
- "st\032 .hbase.pb.IsSnapshotDoneResponse\022V\n\017" +
- "RestoreSnapshot\022 .hbase.pb.RestoreSnapsh" +
- "otRequest\032!.hbase.pb.RestoreSnapshotResp" +
- "onse\022P\n\rExecProcedure\022\036.hbase.pb.ExecPro" +
- "cedureRequest\032\037.hbase.pb.ExecProcedureRe" +
- "sponse\022W\n\024ExecProcedureWithRet\022\036.hbase.p" +
- "b.ExecProcedureRequest\032\037.hbase.pb.ExecPr" +
- "ocedureResponse\022V\n\017IsProcedureDone\022 .hba" +
- "se.pb.IsProcedureDoneRequest\032!.hbase.pb." +
- "IsProcedureDoneResponse\022V\n\017ModifyNamespa",
- "ce\022 .hbase.pb.ModifyNamespaceRequest\032!.h" +
- "base.pb.ModifyNamespaceResponse\022V\n\017Creat" +
- "eNamespace\022 .hbase.pb.CreateNamespaceReq" +
- "uest\032!.hbase.pb.CreateNamespaceResponse\022" +
- "V\n\017DeleteNamespace\022 .hbase.pb.DeleteName" +
- "spaceRequest\032!.hbase.pb.DeleteNamespaceR" +
- "esponse\022k\n\026GetNamespaceDescriptor\022\'.hbas" +
- "e.pb.GetNamespaceDescriptorRequest\032(.hba" +
- "se.pb.GetNamespaceDescriptorResponse\022q\n\030" +
- "ListNamespaceDescriptors\022).hbase.pb.List",
- "NamespaceDescriptorsRequest\032*.hbase.pb.L" +
- "istNamespaceDescriptorsResponse\022\206\001\n\037List" +
- "TableDescriptorsByNamespace\0220.hbase.pb.L" +
- "istTableDescriptorsByNamespaceRequest\0321." +
- "hbase.pb.ListTableDescriptorsByNamespace" +
- "Response\022t\n\031ListTableNamesByNamespace\022*." +
- "hbase.pb.ListTableNamesByNamespaceReques" +
- "t\032+.hbase.pb.ListTableNamesByNamespaceRe" +
- "sponse\022P\n\rGetTableState\022\036.hbase.pb.GetTa" +
- "bleStateRequest\032\037.hbase.pb.GetTableState",
- "Response\022A\n\010SetQuota\022\031.hbase.pb.SetQuota" +
- "Request\032\032.hbase.pb.SetQuotaResponse\022x\n\037g" +
- "etLastMajorCompactionTimestamp\022).hbase.p" +
- "b.MajorCompactionTimestampRequest\032*.hbas" +
- "e.pb.MajorCompactionTimestampResponse\022\212\001" +
- "\n(getLastMajorCompactionTimestampForRegi" +
- "on\0222.hbase.pb.MajorCompactionTimestampFo" +
- "rRegionRequest\032*.hbase.pb.MajorCompactio" +
- "nTimestampResponse\022_\n\022getProcedureResult" +
- "\022#.hbase.pb.GetProcedureResultRequest\032$.",
- "hbase.pb.GetProcedureResultResponse\022h\n\027g" +
- "etSecurityCapabilities\022%.hbase.pb.Securi" +
- "tyCapabilitiesRequest\032&.hbase.pb.Securit" +
- "yCapabilitiesResponse\022S\n\016AbortProcedure\022" +
- "\037.hbase.pb.AbortProcedureRequest\032 .hbase" +
- ".pb.AbortProcedureResponse\022S\n\016ListProced" +
- "ures\022\037.hbase.pb.ListProceduresRequest\032 ." +
- "hbase.pb.ListProceduresResponseBB\n*org.a" +
- "pache.hadoop.hbase.protobuf.generatedB\014M" +
- "asterProtosH\001\210\001\001\240\001\001"
+ "asterResponse\"\034\n\032IsInMaintenanceModeRequ" +
+ "est\"8\n\033IsInMaintenanceModeResponse\022\031\n\021in" +
+ "MaintenanceMode\030\001 \002(\010\"\037\n\016BalanceRequest\022" +
+ "\r\n\005force\030\001 \001(\010\"\'\n\017BalanceResponse\022\024\n\014bal" +
+ "ancer_ran\030\001 \002(\010\"<\n\031SetBalancerRunningReq",
+ "uest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\"8\n" +
+ "\032SetBalancerRunningResponse\022\032\n\022prev_bala" +
+ "nce_value\030\001 \001(\010\"\032\n\030IsBalancerEnabledRequ" +
+ "est\",\n\031IsBalancerEnabledResponse\022\017\n\007enab" +
+ "led\030\001 \002(\010\"\212\001\n\035SetSplitOrMergeEnabledRequ" +
+ "est\022\017\n\007enabled\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(" +
+ "\010\0220\n\014switch_types\030\003 \003(\0162\032.hbase.pb.Maste" +
+ "rSwitchType\022\021\n\tskip_lock\030\004 \001(\010\"4\n\036SetSpl" +
+ "itOrMergeEnabledResponse\022\022\n\nprev_value\030\001" +
+ " \003(\010\"O\n\034IsSplitOrMergeEnabledRequest\022/\n\013",
+ "switch_type\030\001 \002(\0162\032.hbase.pb.MasterSwitc" +
+ "hType\"0\n\035IsSplitOrMergeEnabledResponse\022\017" +
+ "\n\007enabled\030\001 \002(\010\"+\n)ReleaseSplitOrMergeLo" +
+ "ckAndRollbackRequest\",\n*ReleaseSplitOrMe" +
+ "rgeLockAndRollbackResponse\"\022\n\020NormalizeR" +
+ "equest\"+\n\021NormalizeResponse\022\026\n\016normalize" +
+ "r_ran\030\001 \002(\010\")\n\033SetNormalizerRunningReque" +
+ "st\022\n\n\002on\030\001 \002(\010\"=\n\034SetNormalizerRunningRe" +
+ "sponse\022\035\n\025prev_normalizer_value\030\001 \001(\010\"\034\n" +
+ "\032IsNormalizerEnabledRequest\".\n\033IsNormali",
+ "zerEnabledResponse\022\017\n\007enabled\030\001 \002(\010\"\027\n\025R" +
+ "unCatalogScanRequest\"-\n\026RunCatalogScanRe" +
+ "sponse\022\023\n\013scan_result\030\001 \001(\005\"-\n\033EnableCat" +
+ "alogJanitorRequest\022\016\n\006enable\030\001 \002(\010\"2\n\034En" +
+ "ableCatalogJanitorResponse\022\022\n\nprev_value" +
+ "\030\001 \001(\010\" \n\036IsCatalogJanitorEnabledRequest" +
+ "\"0\n\037IsCatalogJanitorEnabledResponse\022\r\n\005v" +
+ "alue\030\001 \002(\010\"B\n\017SnapshotRequest\022/\n\010snapsho" +
+ "t\030\001 \002(\0132\035.hbase.pb.SnapshotDescription\"," +
+ "\n\020SnapshotResponse\022\030\n\020expected_timeout\030\001",
+ " \002(\003\"\036\n\034GetCompletedSnapshotsRequest\"Q\n\035" +
+ "GetCompletedSnapshotsResponse\0220\n\tsnapsho" +
+ "ts\030\001 \003(\0132\035.hbase.pb.SnapshotDescription\"" +
+ "H\n\025DeleteSnapshotRequest\022/\n\010snapshot\030\001 \002" +
+ "(\0132\035.hbase.pb.SnapshotDescription\"\030\n\026Del" +
+ "eteSnapshotResponse\"s\n\026RestoreSnapshotRe" +
+ "quest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.Snaps" +
+ "hotDescription\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020" +
+ "\n\005nonce\030\003 \001(\004:\0010\"*\n\027RestoreSnapshotRespo" +
+ "nse\022\017\n\007proc_id\030\001 \002(\004\"H\n\025IsSnapshotDoneRe",
+ "quest\022/\n\010snapshot\030\001 \001(\0132\035.hbase.pb.Snaps" +
+ "hotDescription\"^\n\026IsSnapshotDoneResponse" +
+ "\022\023\n\004done\030\001 \001(\010:\005false\022/\n\010snapshot\030\002 \001(\0132" +
+ "\035.hbase.pb.SnapshotDescription\"O\n\034IsRest" +
+ "oreSnapshotDoneRequest\022/\n\010snapshot\030\001 \001(\013" +
+ "2\035.hbase.pb.SnapshotDescription\"4\n\035IsRes" +
+ "toreSnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:\005" +
+ "false\"F\n\033GetSchemaAlterStatusRequest\022\'\n\n" +
+ "table_name\030\001 \002(\0132\023.hbase.pb.TableName\"T\n" +
+ "\034GetSchemaAlterStatusResponse\022\035\n\025yet_to_",
+ "update_regions\030\001 \001(\r\022\025\n\rtotal_regions\030\002 " +
+ "\001(\r\"\213\001\n\032GetTableDescriptorsRequest\022(\n\013ta" +
+ "ble_names\030\001 \003(\0132\023.hbase.pb.TableName\022\r\n\005" +
+ "regex\030\002 \001(\t\022!\n\022include_sys_tables\030\003 \001(\010:" +
+ "\005false\022\021\n\tnamespace\030\004 \001(\t\"J\n\033GetTableDes" +
+ "criptorsResponse\022+\n\014table_schema\030\001 \003(\0132\025" +
+ ".hbase.pb.TableSchema\"[\n\024GetTableNamesRe" +
+ "quest\022\r\n\005regex\030\001 \001(\t\022!\n\022include_sys_tabl" +
+ "es\030\002 \001(\010:\005false\022\021\n\tnamespace\030\003 \001(\t\"A\n\025Ge" +
+ "tTableNamesResponse\022(\n\013table_names\030\001 \003(\013",
+ "2\023.hbase.pb.TableName\"?\n\024GetTableStateRe" +
+ "quest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Tab" +
+ "leName\"B\n\025GetTableStateResponse\022)\n\013table" +
+ "_state\030\001 \002(\0132\024.hbase.pb.TableState\"\031\n\027Ge" +
+ "tClusterStatusRequest\"K\n\030GetClusterStatu" +
+ "sResponse\022/\n\016cluster_status\030\001 \002(\0132\027.hbas" +
+ "e.pb.ClusterStatus\"\030\n\026IsMasterRunningReq" +
+ "uest\"4\n\027IsMasterRunningResponse\022\031\n\021is_ma" +
+ "ster_running\030\001 \002(\010\"I\n\024ExecProcedureReque" +
+ "st\0221\n\tprocedure\030\001 \002(\0132\036.hbase.pb.Procedu",
+ "reDescription\"F\n\025ExecProcedureResponse\022\030" +
+ "\n\020expected_timeout\030\001 \001(\003\022\023\n\013return_data\030" +
+ "\002 \001(\014\"K\n\026IsProcedureDoneRequest\0221\n\tproce" +
+ "dure\030\001 \001(\0132\036.hbase.pb.ProcedureDescripti" +
+ "on\"`\n\027IsProcedureDoneResponse\022\023\n\004done\030\001 " +
+ "\001(\010:\005false\0220\n\010snapshot\030\002 \001(\0132\036.hbase.pb." +
+ "ProcedureDescription\",\n\031GetProcedureResu" +
+ "ltRequest\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetProced" +
+ "ureResultResponse\0229\n\005state\030\001 \002(\0162*.hbase" +
+ ".pb.GetProcedureResultResponse.State\022\022\n\n",
+ "start_time\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n" +
+ "\006result\030\004 \001(\014\0224\n\texception\030\005 \001(\0132!.hbase" +
+ ".pb.ForeignExceptionMessage\"1\n\005State\022\r\n\t" +
+ "NOT_FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M" +
+ "\n\025AbortProcedureRequest\022\017\n\007proc_id\030\001 \002(\004" +
+ "\022#\n\025mayInterruptIfRunning\030\002 \001(\010:\004true\"6\n" +
+ "\026AbortProcedureResponse\022\034\n\024is_procedure_" +
+ "aborted\030\001 \002(\010\"\027\n\025ListProceduresRequest\"@" +
+ "\n\026ListProceduresResponse\022&\n\tprocedure\030\001 " +
+ "\003(\0132\023.hbase.pb.Procedure\"\315\001\n\017SetQuotaReq",
+ "uest\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser_group\030\002 " +
+ "\001(\t\022\021\n\tnamespace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001" +
+ "(\0132\023.hbase.pb.TableName\022\022\n\nremove_all\030\005 " +
+ "\001(\010\022\026\n\016bypass_globals\030\006 \001(\010\022+\n\010throttle\030" +
+ "\007 \001(\0132\031.hbase.pb.ThrottleRequest\"\022\n\020SetQ" +
+ "uotaResponse\"J\n\037MajorCompactionTimestamp" +
+ "Request\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.T" +
+ "ableName\"U\n(MajorCompactionTimestampForR" +
+ "egionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb." +
+ "RegionSpecifier\"@\n MajorCompactionTimest",
+ "ampResponse\022\034\n\024compaction_timestamp\030\001 \002(" +
+ "\003\"\035\n\033SecurityCapabilitiesRequest\"\354\001\n\034Sec" +
+ "urityCapabilitiesResponse\022G\n\014capabilitie" +
+ "s\030\001 \003(\01621.hbase.pb.SecurityCapabilitiesR" +
+ "esponse.Capability\"\202\001\n\nCapability\022\031\n\025SIM" +
+ "PLE_AUTHENTICATION\020\000\022\031\n\025SECURE_AUTHENTIC" +
+ "ATION\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHO" +
+ "RIZATION\020\003\022\023\n\017CELL_VISIBILITY\020\004*(\n\020Maste" +
+ "rSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\345)\n\rMa" +
+ "sterService\022e\n\024GetSchemaAlterStatus\022%.hb",
+ "ase.pb.GetSchemaAlterStatusRequest\032&.hba" +
+ "se.pb.GetSchemaAlterStatusResponse\022b\n\023Ge" +
+ "tTableDescriptors\022$.hbase.pb.GetTableDes" +
+ "criptorsRequest\032%.hbase.pb.GetTableDescr" +
+ "iptorsResponse\022P\n\rGetTableNames\022\036.hbase." +
+ "pb.GetTableNamesRequest\032\037.hbase.pb.GetTa" +
+ "bleNamesResponse\022Y\n\020GetClusterStatus\022!.h" +
+ "base.pb.GetClusterStatusRequest\032\".hbase." +
+ "pb.GetClusterStatusResponse\022V\n\017IsMasterR" +
+ "unning\022 .hbase.pb.IsMasterRunningRequest",
+ "\032!.hbase.pb.IsMasterRunningResponse\022D\n\tA" +
+ "ddColumn\022\032.hbase.pb.AddColumnRequest\032\033.h" +
+ "base.pb.AddColumnResponse\022M\n\014DeleteColum" +
+ "n\022\035.hbase.pb.DeleteColumnRequest\032\036.hbase" +
+ ".pb.DeleteColumnResponse\022M\n\014ModifyColumn" +
+ "\022\035.hbase.pb.ModifyColumnRequest\032\036.hbase." +
+ "pb.ModifyColumnResponse\022G\n\nMoveRegion\022\033." +
+ "hbase.pb.MoveRegionRequest\032\034.hbase.pb.Mo" +
+ "veRegionResponse\022k\n\026DispatchMergingRegio" +
+ "ns\022\'.hbase.pb.DispatchMergingRegionsRequ",
+ "est\032(.hbase.pb.DispatchMergingRegionsRes" +
+ "ponse\022M\n\014AssignRegion\022\035.hbase.pb.AssignR" +
+ "egionRequest\032\036.hbase.pb.AssignRegionResp" +
+ "onse\022S\n\016UnassignRegion\022\037.hbase.pb.Unassi" +
+ "gnRegionRequest\032 .hbase.pb.UnassignRegio" +
+ "nResponse\022P\n\rOfflineRegion\022\036.hbase.pb.Of" +
+ "flineRegionRequest\032\037.hbase.pb.OfflineReg" +
+ "ionResponse\022J\n\013DeleteTable\022\034.hbase.pb.De" +
+ "leteTableRequest\032\035.hbase.pb.DeleteTableR" +
+ "esponse\022P\n\rtruncateTable\022\036.hbase.pb.Trun",
+ "cateTableRequest\032\037.hbase.pb.TruncateTabl" +
+ "eResponse\022J\n\013EnableTable\022\034.hbase.pb.Enab" +
+ "leTableRequest\032\035.hbase.pb.EnableTableRes" +
+ "ponse\022M\n\014DisableTable\022\035.hbase.pb.Disable" +
+ "TableRequest\032\036.hbase.pb.DisableTableResp" +
+ "onse\022J\n\013ModifyTable\022\034.hbase.pb.ModifyTab" +
+ "leRequest\032\035.hbase.pb.ModifyTableResponse" +
+ "\022J\n\013CreateTable\022\034.hbase.pb.CreateTableRe" +
+ "quest\032\035.hbase.pb.CreateTableResponse\022A\n\010" +
+ "Shutdown\022\031.hbase.pb.ShutdownRequest\032\032.hb",
+ "ase.pb.ShutdownResponse\022G\n\nStopMaster\022\033." +
+ "hbase.pb.StopMasterRequest\032\034.hbase.pb.St" +
+ "opMasterResponse\022h\n\031IsMasterInMaintenanc" +
+ "eMode\022$.hbase.pb.IsInMaintenanceModeRequ" +
+ "est\032%.hbase.pb.IsInMaintenanceModeRespon" +
+ "se\022>\n\007Balance\022\030.hbase.pb.BalanceRequest\032" +
+ "\031.hbase.pb.BalanceResponse\022_\n\022SetBalance" +
+ "rRunning\022#.hbase.pb.SetBalancerRunningRe" +
+ "quest\032$.hbase.pb.SetBalancerRunningRespo" +
+ "nse\022\\\n\021IsBalancerEnabled\022\".hbase.pb.IsBa",
+ "lancerEnabledRequest\032#.hbase.pb.IsBalanc" +
+ "erEnabledResponse\022k\n\026SetSplitOrMergeEnab" +
+ "led\022\'.hbase.pb.SetSplitOrMergeEnabledReq" +
+ "uest\032(.hbase.pb.SetSplitOrMergeEnabledRe" +
+ "sponse\022h\n\025IsSplitOrMergeEnabled\022&.hbase." +
+ "pb.IsSplitOrMergeEnabledRequest\032\'.hbase." +
+ "pb.IsSplitOrMergeEnabledResponse\022\217\001\n\"Rel" +
+ "easeSplitOrMergeLockAndRollback\0223.hbase." +
+ "pb.ReleaseSplitOrMergeLockAndRollbackReq" +
+ "uest\0324.hbase.pb.ReleaseSplitOrMergeLockA",
+ "ndRollbackResponse\022D\n\tNormalize\022\032.hbase." +
+ "pb.NormalizeRequest\032\033.hbase.pb.Normalize" +
+ "Response\022e\n\024SetNormalizerRunning\022%.hbase" +
+ ".pb.SetNormalizerRunningRequest\032&.hbase." +
+ "pb.SetNormalizerRunningResponse\022b\n\023IsNor" +
+ "malizerEnabled\022$.hbase.pb.IsNormalizerEn" +
+ "abledRequest\032%.hbase.pb.IsNormalizerEnab" +
+ "ledResponse\022S\n\016RunCatalogScan\022\037.hbase.pb" +
+ ".RunCatalogScanRequest\032 .hbase.pb.RunCat" +
+ "alogScanResponse\022e\n\024EnableCatalogJanitor",
+ "\022%.hbase.pb.EnableCatalogJanitorRequest\032" +
+ "&.hbase.pb.EnableCatalogJanitorResponse\022" +
+ "n\n\027IsCatalogJanitorEnabled\022(.hbase.pb.Is" +
+ "CatalogJanitorEnabledRequest\032).hbase.pb." +
+ "IsCatalogJanitorEnabledResponse\022^\n\021ExecM" +
+ "asterService\022#.hbase.pb.CoprocessorServi" +
+ "ceRequest\032$.hbase.pb.CoprocessorServiceR" +
+ "esponse\022A\n\010Snapshot\022\031.hbase.pb.SnapshotR" +
+ "equest\032\032.hbase.pb.SnapshotResponse\022h\n\025Ge" +
+ "tCompletedSnapshots\022&.hbase.pb.GetComple",
+ "tedSnapshotsRequest\032\'.hbase.pb.GetComple" +
+ "tedSnapshotsResponse\022S\n\016DeleteSnapshot\022\037" +
+ ".hbase.pb.DeleteSnapshotRequest\032 .hbase." +
+ "pb.DeleteSnapshotResponse\022S\n\016IsSnapshotD" +
+ "one\022\037.hbase.pb.IsSnapshotDoneRequest\032 .h" +
+ "base.pb.IsSnapshotDoneResponse\022V\n\017Restor" +
+ "eSnapshot\022 .hbase.pb.RestoreSnapshotRequ" +
+ "est\032!.hbase.pb.RestoreSnapshotResponse\022P" +
+ "\n\rExecProcedure\022\036.hbase.pb.ExecProcedure" +
+ "Request\032\037.hbase.pb.ExecProcedureResponse",
+ "\022W\n\024ExecProcedureWithRet\022\036.hbase.pb.Exec" +
+ "ProcedureRequest\032\037.hbase.pb.ExecProcedur" +
+ "eResponse\022V\n\017IsProcedureDone\022 .hbase.pb." +
+ "IsProcedureDoneRequest\032!.hbase.pb.IsProc" +
+ "edureDoneResponse\022V\n\017ModifyNamespace\022 .h" +
+ "base.pb.ModifyNamespaceRequest\032!.hbase.p" +
+ "b.ModifyNamespaceResponse\022V\n\017CreateNames" +
+ "pace\022 .hbase.pb.CreateNamespaceRequest\032!" +
+ ".hbase.pb.CreateNamespaceResponse\022V\n\017Del" +
+ "eteNamespace\022 .hbase.pb.DeleteNamespaceR",
+ "equest\032!.hbase.pb.DeleteNamespaceRespons" +
+ "e\022k\n\026GetNamespaceDescriptor\022\'.hbase.pb.G" +
+ "etNamespaceDescriptorRequest\032(.hbase.pb." +
+ "GetNamespaceDescriptorResponse\022q\n\030ListNa" +
+ "mespaceDescriptors\022).hbase.pb.ListNamesp" +
+ "aceDescriptorsRequest\032*.hbase.pb.ListNam" +
+ "espaceDescriptorsResponse\022\206\001\n\037ListTableD" +
+ "escriptorsByNamespace\0220.hbase.pb.ListTab" +
+ "leDescriptorsByNamespaceRequest\0321.hbase." +
+ "pb.ListTableDescriptorsByNamespaceRespon",
+ "se\022t\n\031ListTableNamesByNamespace\022*.hbase." +
+ "pb.ListTableNamesByNamespaceRequest\032+.hb" +
+ "ase.pb.ListTableNamesByNamespaceResponse" +
+ "\022P\n\rGetTableState\022\036.hbase.pb.GetTableSta" +
+ "teRequest\032\037.hbase.pb.GetTableStateRespon" +
+ "se\022A\n\010SetQuota\022\031.hbase.pb.SetQuotaReques" +
+ "t\032\032.hbase.pb.SetQuotaResponse\022x\n\037getLast" +
+ "MajorCompactionTimestamp\022).hbase.pb.Majo" +
+ "rCompactionTimestampRequest\032*.hbase.pb.M" +
+ "ajorCompactionTimestampResponse\022\212\001\n(getL",
+ "astMajorCompactionTimestampForRegion\0222.h" +
+ "base.pb.MajorCompactionTimestampForRegio" +
+ "nRequest\032*.hbase.pb.MajorCompactionTimes" +
+ "tampResponse\022_\n\022getProcedureResult\022#.hba" +
+ "se.pb.GetProcedureResultRequest\032$.hbase." +
+ "pb.GetProcedureResultResponse\022h\n\027getSecu" +
+ "rityCapabilities\022%.hbase.pb.SecurityCapa" +
+ "bilitiesRequest\032&.hbase.pb.SecurityCapab" +
+ "ilitiesResponse\022S\n\016AbortProcedure\022\037.hbas" +
+ "e.pb.AbortProcedureRequest\032 .hbase.pb.Ab",
+ "ortProcedureResponse\022S\n\016ListProcedures\022\037" +
+ ".hbase.pb.ListProceduresRequest\032 .hbase." +
+ "pb.ListProceduresResponseBB\n*org.apache." +
+ "hadoop.hbase.protobuf.generatedB\014MasterP" +
+ "rotosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -66163,392 +67039,404 @@ public final class MasterProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_StopMasterResponse_descriptor,
new java.lang.String[] { });
- internal_static_hbase_pb_BalanceRequest_descriptor =
+ internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor =
getDescriptor().getMessageTypes().get(46);
+ internal_static_hbase_pb_IsInMaintenanceModeRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor,
+ new java.lang.String[] { });
+ internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor =
+ getDescriptor().getMessageTypes().get(47);
+ internal_static_hbase_pb_IsInMaintenanceModeResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor,
+ new java.lang.String[] { "InMaintenanceMode", });
+ internal_static_hbase_pb_BalanceRequest_descriptor =
+ getDescriptor().getMessageTypes().get(48);
internal_static_hbase_pb_BalanceRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_BalanceRequest_descriptor,
new java.lang.String[] { "Force", });
internal_static_hbase_pb_BalanceResponse_descriptor =
- getDescriptor().getMessageTypes().get(47);
+ getDescriptor().getMessageTypes().get(49);
internal_static_hbase_pb_BalanceResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_BalanceResponse_descriptor,
new java.lang.String[] { "BalancerRan", });
internal_static_hbase_pb_SetBalancerRunningRequest_descriptor =
- getDescriptor().getMessageTypes().get(48);
+ getDescriptor().getMessageTypes().get(50);
internal_static_hbase_pb_SetBalancerRunningRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_SetBalancerRunningRequest_descriptor,
new java.lang.String[] { "On", "Synchronous", });
internal_static_hbase_pb_SetBalancerRunningResponse_descriptor =
- getDescriptor().getMessageTypes().get(49);
+ getDescriptor().getMessageTypes().get(51);
internal_static_hbase_pb_SetBalancerRunningResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_SetBalancerRunningResponse_descriptor,
new java.lang.String[] { "PrevBalanceValue", });
internal_static_hbase_pb_IsBalancerEnabledRequest_descriptor =
- getDescriptor().getMessageTypes().get(50);
+ getDescriptor().getMessageTypes().get(52);
internal_static_hbase_pb_IsBalancerEnabledRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_IsBalancerEnabledRequest_descriptor,
new java.lang.String[] { });
internal_static_hbase_pb_IsBalancerEnabledResponse_descriptor =
- getDescriptor().getMessageTypes().get(51);
+ getDescriptor().getMessageTypes().get(53);
internal_static_hbase_pb_IsBalancerEnabledResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_IsBalancerEnabledResponse_descriptor,
new java.lang.String[] { "Enabled", });
internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_descriptor =
- getDescriptor().getMessageTypes().get(52);
+ getDescriptor().getMessageTypes().get(54);
internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_descriptor,
new java.lang.String[] { "Enabled", "Synchronous", "SwitchTypes", "SkipLock", });
internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_descriptor =
- getDescriptor().getMessageTypes().get(53);
+ getDescriptor().getMessageTypes().get(55);
internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_descriptor,
new java.lang.String[] { "PrevValue", });
internal_static_hbase_pb_IsSplitOrMergeEnabledRequest_descriptor =
- getDescriptor().getMessageTypes().get(54);
+ getDescriptor().getMessageTypes().get(56);
internal_static_hbase_pb_IsSplitOrMergeEnabledRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_IsSplitOrMergeEnabledRequest_descriptor,
new java.lang.String[] { "SwitchType", });
internal_static_hbase_pb_IsSplitOrMergeEnabledResponse_descriptor =
- getDescriptor().getMessageTypes().get(55);
+ getDescriptor().getMessageTypes().get(57);
internal_static_hbase_pb_IsSplitOrMergeEnabledResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_IsSplitOrMergeEnabledResponse_descriptor,
new java.lang.String[] { "Enabled", });
internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_descriptor =
- getDescriptor().getMessageTypes().get(56);
+ getDescriptor().getMessageTypes().get(58);
internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_descriptor,
new java.lang.String[] { });
internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_descriptor =
- getDescriptor().getMessageTypes().get(57);
+ getDescriptor().getMessageTypes().get(59);
internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_descriptor,
new java.lang.String[] { });
internal_static_hbase_pb_NormalizeRequest_descriptor =
- getDescriptor().getMessageTypes().get(58);
+ getDescriptor().getMessageTypes().get(60);
internal_static_hbase_pb_NormalizeRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_NormalizeRequest_descriptor,
new java.lang.String[] { });
internal_static_hbase_pb_NormalizeResponse_descriptor =
- getDescriptor().getMessageTypes().get(59);
+ getDescriptor().getMessageTypes().get(61);
internal_static_hbase_pb_NormalizeResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_NormalizeResponse_descriptor,
new java.lang.String[] { "NormalizerRan", });
internal_static_hbase_pb_SetNormalizerRunningRequest_descriptor =
- getDescriptor().getMessageTypes().get(60);
+ getDescriptor().getMessageTypes().get(62);
internal_static_hbase_pb_SetNormalizerRunningRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_SetNormalizerRunningRequest_descriptor,
new java.lang.String[] { "On", });
internal_static_hbase_pb_SetNormalizerRunningResponse_descriptor =
- getDescriptor().getMessageTypes().get(61);
+ getDescriptor().getMessageTypes().get(63);
internal_static_hbase_pb_SetNormalizerRunningResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_SetNormalizerRunningResponse_descriptor,
new java.lang.String[] { "PrevNormalizerValue", });
internal_static_hbase_pb_IsNormalizerEnabledRequest_descriptor =
- getDescriptor().getMessageTypes().get(62);
+ getDescriptor().getMessageTypes().get(64);
internal_static_hbase_pb_IsNormalizerEnabledRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_IsNormalizerEnabledRequest_descriptor,
new java.lang.String[] { });
internal_static_hbase_pb_IsNormalizerEnabledResponse_descriptor =
- getDescriptor().getMessageTypes().get(63);
+ getDescriptor().getMessageTypes().get(65);
internal_static_hbase_pb_IsNormalizerEnabledResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_IsNormalizerEnabledResponse_descriptor,
new java.lang.String[] { "Enabled", });
internal_static_hbase_pb_RunCatalogScanRequest_descriptor =
- getDescriptor().getMessageTypes().get(64);
+ getDescriptor().getMessageTypes().get(66);
internal_static_hbase_pb_RunCatalogScanRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_RunCatalogScanRequest_descriptor,
new java.lang.String[] { });
internal_static_hbase_pb_RunCatalogScanResponse_descriptor =
- getDescriptor().getMessageTypes().get(65);
+ getDescriptor().getMessageTypes().get(67);
internal_static_hbase_pb_RunCatalogScanResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_RunCatalogScanResponse_descriptor,
new java.lang.String[] { "ScanResult", });
internal_static_hbase_pb_EnableCatalogJanitorRequest_descriptor =
- getDescriptor().getMessageTypes().get(66);
+ getDescriptor().getMessageTypes().get(68);
internal_static_hbase_pb_EnableCatalogJanitorRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_EnableCatalogJanitorRequest_descriptor,
new java.lang.String[] { "Enable", });
internal_static_hbase_pb_EnableCatalogJanitorResponse_descriptor =
- getDescriptor().getMessageTypes().get(67);
+ getDescriptor().getMessageTypes().get(69);
internal_static_hbase_pb_EnableCatalogJanitorResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_EnableCatalogJanitorResponse_descriptor,
new java.lang.String[] { "PrevValue", });
internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_descriptor =
- getDescriptor().getMessageTypes().get(68);
+ getDescriptor().getMessageTypes().get(70);
internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_descriptor,
new java.lang.String[] { });
internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_descriptor =
- getDescriptor().getMessageTypes().get(69);
+ getDescriptor().getMessageTypes().get(71);
internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_descriptor,
new java.lang.String[] { "Value", });
internal_static_hbase_pb_SnapshotRequest_descriptor =
- getDescriptor().getMessageTypes().get(70);
+ getDescriptor().getMessageTypes().get(72);
internal_static_hbase_pb_SnapshotRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_SnapshotRequest_descriptor,
new java.lang.String[] { "Snapshot", });
internal_static_hbase_pb_SnapshotResponse_descriptor =
- getDescriptor().getMessageTypes().get(71);
+ getDescriptor().getMessageTypes().get(73);
internal_static_hbase_pb_SnapshotResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_SnapshotResponse_descriptor,
new java.lang.String[] { "ExpectedTimeout", });
internal_static_hbase_pb_GetCompletedSnapshotsRequest_descriptor =
- getDescriptor().getMessageTypes().get(72);
+ getDescriptor().getMessageTypes().get(74);
internal_static_hbase_pb_GetCompletedSnapshotsRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_GetCompletedSnapshotsRequest_descriptor,
new java.lang.String[] { });
internal_static_hbase_pb_GetCompletedSnapshotsResponse_descriptor =
- getDescriptor().getMessageTypes().get(73);
+ getDescriptor().getMessageTypes().get(75);
internal_static_hbase_pb_GetCompletedSnapshotsResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_GetCompletedSnapshotsResponse_descriptor,
new java.lang.String[] { "Snapshots", });
internal_static_hbase_pb_DeleteSnapshotRequest_descriptor =
- getDescriptor().getMessageTypes().get(74);
+ getDescriptor().getMessageTypes().get(76);
internal_static_hbase_pb_DeleteSnapshotRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_DeleteSnapshotRequest_descriptor,
new java.lang.String[] { "Snapshot", });
internal_static_hbase_pb_DeleteSnapshotResponse_descriptor =
- getDescriptor().getMessageTypes().get(75);
+ getDescriptor().getMessageTypes().get(77);
internal_static_hbase_pb_DeleteSnapshotResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_DeleteSnapshotResponse_descriptor,
new java.lang.String[] { });
internal_static_hbase_pb_RestoreSnapshotRequest_descriptor =
- getDescriptor().getMessageTypes().get(76);
+ getDescriptor().getMessageTypes().get(78);
internal_static_hbase_pb_RestoreSnapshotRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_RestoreSnapshotRequest_descriptor,
new java.lang.String[] { "Snapshot", "NonceGroup", "Nonce", });
internal_static_hbase_pb_RestoreSnapshotResponse_descriptor =
- getDescriptor().getMessageTypes().get(77);
+ getDescriptor().getMessageTypes().get(79);
internal_static_hbase_pb_RestoreSnapshotResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_RestoreSnapshotResponse_descriptor,
new java.lang.String[] { "ProcId", });
internal_static_hbase_pb_IsSnapshotDoneRequest_descriptor =
- getDescriptor().getMessageTypes().get(78);
+ getDescriptor().getMessageTypes().get(80);
internal_static_hbase_pb_IsSnapshotDoneRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_IsSnapshotDoneRequest_descriptor,
new java.lang.String[] { "Snapshot", });
internal_static_hbase_pb_IsSnapshotDoneResponse_descriptor =
- getDescriptor().getMessageTypes().get(79);
+ getDescriptor().getMessageTypes().get(81);
internal_static_hbase_pb_IsSnapshotDoneResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_IsSnapshotDoneResponse_descriptor,
new java.lang.String[] { "Done", "Snapshot", });
internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_descriptor =
- getDescriptor().getMessageTypes().get(80);
+ getDescriptor().getMessageTypes().get(82);
internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_descriptor,
new java.lang.String[] { "Snapshot", });
internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_descriptor =
- getDescriptor().getMessageTypes().get(81);
+ getDescriptor().getMessageTypes().get(83);
internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_descriptor,
new java.lang.String[] { "Done", });
internal_static_hbase_pb_GetSchemaAlterStatusRequest_descriptor =
- getDescriptor().getMessageTypes().get(82);
+ getDescriptor().getMessageTypes().get(84);
internal_static_hbase_pb_GetSchemaAlterStatusRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_GetSchemaAlterStatusRequest_descriptor,
new java.lang.String[] { "TableName", });
internal_static_hbase_pb_GetSchemaAlterStatusResponse_descriptor =
- getDescriptor().getMessageTypes().get(83);
+ getDescriptor().getMessageTypes().get(85);
internal_static_hbase_pb_GetSchemaAlterStatusResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_GetSchemaAlterStatusResponse_descriptor,
new java.lang.String[] { "YetToUpdateRegions", "TotalRegions", });
internal_static_hbase_pb_GetTableDescriptorsRequest_descriptor =
- getDescriptor().getMessageTypes().get(84);
+ getDescriptor().getMessageTypes().get(86);
internal_static_hbase_pb_GetTableDescriptorsRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_GetTableDescriptorsRequest_descriptor,
new java.lang.String[] { "TableNames", "Regex", "IncludeSysTables", "Namespace", });
internal_static_hbase_pb_GetTableDescriptorsResponse_descriptor =
- getDescriptor().getMessageTypes().get(85);
+ getDescriptor().getMessageTypes().get(87);
internal_static_hbase_pb_GetTableDescriptorsResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_GetTableDescriptorsResponse_descriptor,
new java.lang.String[] { "TableSchema", });
internal_static_hbase_pb_GetTableNamesRequest_descriptor =
- getDescriptor().getMessageTypes().get(86);
+ getDescriptor().getMessageTypes().get(88);
internal_static_hbase_pb_GetTableNamesRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_GetTableNamesRequest_descriptor,
new java.lang.String[] { "Regex", "IncludeSysTables", "Namespace", });
internal_static_hbase_pb_GetTableNamesResponse_descriptor =
- getDescriptor().getMessageTypes().get(87);
+ getDescriptor().getMessageTypes().get(89);
internal_static_hbase_pb_GetTableNamesResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_GetTableNamesResponse_descriptor,
new java.lang.String[] { "TableNames", });
internal_static_hbase_pb_GetTableStateRequest_descriptor =
- getDescriptor().getMessageTypes().get(88);
+ getDescriptor().getMessageTypes().get(90);
internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_GetTableStateRequest_descriptor,
new java.lang.String[] { "TableName", });
internal_static_hbase_pb_GetTableStateResponse_descriptor =
- getDescriptor().getMessageTypes().get(89);
+ getDescriptor().getMessageTypes().get(91);
internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_GetTableStateResponse_descriptor,
new java.lang.String[] { "TableState", });
internal_static_hbase_pb_GetClusterStatusRequest_descriptor =
- getDescriptor().getMessageTypes().get(90);
+ getDescriptor().getMessageTypes().get(92);
internal_static_hbase_pb_GetClusterStatusRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_GetClusterStatusRequest_descriptor,
new java.lang.String[] { });
internal_static_hbase_pb_GetClusterStatusResponse_descriptor =
- getDescriptor().getMessageTypes().get(91);
+ getDescriptor().getMessageTypes().get(93);
internal_static_hbase_pb_GetClusterStatusResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_GetClusterStatusResponse_descriptor,
new java.lang.String[] { "ClusterStatus", });
internal_static_hbase_pb_IsMasterRunningRequest_descriptor =
- getDescriptor().getMessageTypes().get(92);
+ getDescriptor().getMessageTypes().get(94);
internal_static_hbase_pb_IsMasterRunningRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_IsMasterRunningRequest_descriptor,
new java.lang.String[] { });
internal_static_hbase_pb_IsMasterRunningResponse_descriptor =
- getDescriptor().getMessageTypes().get(93);
+ getDescriptor().getMessageTypes().get(95);
internal_static_hbase_pb_IsMasterRunningResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_IsMasterRunningResponse_descriptor,
new java.lang.String[] { "IsMasterRunning", });
internal_static_hbase_pb_ExecProcedureRequest_descriptor =
- getDescriptor().getMessageTypes().get(94);
+ getDescriptor().getMessageTypes().get(96);
internal_static_hbase_pb_ExecProcedureRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ExecProcedureRequest_descriptor,
new java.lang.String[] { "Procedure", });
internal_static_hbase_pb_ExecProcedureResponse_descriptor =
- getDescriptor().getMessageTypes().get(95);
+ getDescriptor().getMessageTypes().get(97);
internal_static_hbase_pb_ExecProcedureResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ExecProcedureResponse_descriptor,
new java.lang.String[] { "ExpectedTimeout", "ReturnData", });
internal_static_hbase_pb_IsProcedureDoneRequest_descriptor =
- getDescriptor().getMessageTypes().get(96);
+ getDescriptor().getMessageTypes().get(98);
internal_static_hbase_pb_IsProcedureDoneRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_IsProcedureDoneRequest_descriptor,
new java.lang.String[] { "Procedure", });
internal_static_hbase_pb_IsProcedureDoneResponse_descriptor =
- getDescriptor().getMessageTypes().get(97);
+ getDescriptor().getMessageTypes().get(99);
internal_static_hbase_pb_IsProcedureDoneResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_IsProcedureDoneResponse_descriptor,
new java.lang.String[] { "Done", "Snapshot", });
internal_static_hbase_pb_GetProcedureResultRequest_descriptor =
- getDescriptor().getMessageTypes().get(98);
+ getDescriptor().getMessageTypes().get(100);
internal_static_hbase_pb_GetProcedureResultRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_GetProcedureResultRequest_descriptor,
new java.lang.String[] { "ProcId", });
internal_static_hbase_pb_GetProcedureResultResponse_descriptor =
- getDescriptor().getMessageTypes().get(99);
+ getDescriptor().getMessageTypes().get(101);
internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_GetProcedureResultResponse_descriptor,
new java.lang.String[] { "State", "StartTime", "LastUpdate", "Result", "Exception", });
internal_static_hbase_pb_AbortProcedureRequest_descriptor =
- getDescriptor().getMessageTypes().get(100);
+ getDescriptor().getMessageTypes().get(102);
internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_AbortProcedureRequest_descriptor,
new java.lang.String[] { "ProcId", "MayInterruptIfRunning", });
internal_static_hbase_pb_AbortProcedureResponse_descriptor =
- getDescriptor().getMessageTypes().get(101);
+ getDescriptor().getMessageTypes().get(103);
internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_AbortProcedureResponse_descriptor,
new java.lang.String[] { "IsProcedureAborted", });
internal_static_hbase_pb_ListProceduresRequest_descriptor =
- getDescriptor().getMessageTypes().get(102);
+ getDescriptor().getMessageTypes().get(104);
internal_static_hbase_pb_ListProceduresRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ListProceduresRequest_descriptor,
new java.lang.String[] { });
internal_static_hbase_pb_ListProceduresResponse_descriptor =
- getDescriptor().getMessageTypes().get(103);
+ getDescriptor().getMessageTypes().get(105);
internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_ListProceduresResponse_descriptor,
new java.lang.String[] { "Procedure", });
internal_static_hbase_pb_SetQuotaRequest_descriptor =
- getDescriptor().getMessageTypes().get(104);
+ getDescriptor().getMessageTypes().get(106);
internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_SetQuotaRequest_descriptor,
new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", });
internal_static_hbase_pb_SetQuotaResponse_descriptor =
- getDescriptor().getMessageTypes().get(105);
+ getDescriptor().getMessageTypes().get(107);
internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_SetQuotaResponse_descriptor,
new java.lang.String[] { });
internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor =
- getDescriptor().getMessageTypes().get(106);
+ getDescriptor().getMessageTypes().get(108);
internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor,
new java.lang.String[] { "TableName", });
internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor =
- getDescriptor().getMessageTypes().get(107);
+ getDescriptor().getMessageTypes().get(109);
internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor,
new java.lang.String[] { "Region", });
internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor =
- getDescriptor().getMessageTypes().get(108);
+ getDescriptor().getMessageTypes().get(110);
internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor,
new java.lang.String[] { "CompactionTimestamp", });
internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor =
- getDescriptor().getMessageTypes().get(109);
+ getDescriptor().getMessageTypes().get(111);
internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor,
new java.lang.String[] { });
internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor =
- getDescriptor().getMessageTypes().get(110);
+ getDescriptor().getMessageTypes().get(112);
internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor,
diff --git hbase-protocol/src/main/protobuf/Master.proto hbase-protocol/src/main/protobuf/Master.proto
index ad8111e..542a872 100644
--- hbase-protocol/src/main/protobuf/Master.proto
+++ hbase-protocol/src/main/protobuf/Master.proto
@@ -255,6 +255,13 @@ message StopMasterRequest {
message StopMasterResponse {
}
+message IsInMaintenanceModeRequest {
+}
+
+message IsInMaintenanceModeResponse {
+ required bool inMaintenanceMode = 1;
+}
+
message BalanceRequest {
optional bool force = 1;
}
@@ -645,6 +652,12 @@ service MasterService {
returns(StopMasterResponse);
/**
+ * Query whether the Master is in maintenance mode.
+ */
+ rpc IsMasterInMaintenanceMode(IsInMaintenanceModeRequest)
+ returns(IsInMaintenanceModeResponse);
+
+ /**
* Run the balancer. Will run the balancer and if regions to move, it will
* go ahead and do the reassignments. Can NOT run for various reasons.
* Check logs.
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index c93b307..476c796 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -109,6 +109,7 @@ public class CatalogJanitor extends ScheduledChore {
try {
AssignmentManager am = this.services.getAssignmentManager();
if (this.enabled.get()
+ && !this.services.isInMaintenanceMode()
&& am != null
&& am.isFailoverCleanupDone()
&& am.getRegionStates().getRegionsInTransition().size() == 0) {
@@ -241,6 +242,11 @@ public class CatalogJanitor extends ScheduledChore {
int mergeCleaned = 0;
Map mergedRegions = scanTriple.getSecond();
for (Map.Entry e : mergedRegions.entrySet()) {
+ if (this.services.isInMaintenanceMode()) {
+ // Stop cleaning if the master is in maintenance mode
+ break;
+ }
+
PairOfSameType p = MetaTableAccessor.getMergeRegions(e.getValue());
HRegionInfo regionA = p.getFirst();
HRegionInfo regionB = p.getSecond();
@@ -266,6 +272,11 @@ public class CatalogJanitor extends ScheduledChore {
// regions whose parents are still around
HashSet parentNotCleaned = new HashSet();
for (Map.Entry e : splitParents.entrySet()) {
+ if (this.services.isInMaintenanceMode()) {
+ // Stop cleaning if the master is in maintenance mode
+ break;
+ }
+
if (!parentNotCleaned.contains(e.getKey().getEncodedName()) &&
cleanParent(e.getKey(), e.getValue())) {
splitCleaned++;
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 3803635..ee5d7fb 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -24,7 +24,6 @@ import com.google.common.collect.Maps;
import com.google.protobuf.Descriptors;
import com.google.protobuf.Service;
-
import java.io.IOException;
import java.io.InterruptedIOException;
import java.lang.reflect.Constructor;
@@ -1268,6 +1267,12 @@ public class HMaster extends HRegionServer implements MasterServices {
LOG.debug("Master has not been initialized, don't run balancer.");
return false;
}
+
+ if (isInMaintenanceMode()) {
+ LOG.info("Master is in maintenanceMode mode, don't run balancer.");
+ return false;
+ }
+
// Do this call outside of synchronized block.
int maximumBalanceTime = getBalancerCutoffTime();
synchronized (this.balancer) {
@@ -1371,6 +1376,11 @@ public class HMaster extends HRegionServer implements MasterServices {
return false;
}
+ if (isInMaintenanceMode()) {
+ LOG.info("Master is in maintenance mode, don't run region normalizer.");
+ return false;
+ }
+
if (!this.regionNormalizerTracker.isNormalizerOn()) {
LOG.debug("Region normalization is disabled, don't run region normalizer.");
return false;
@@ -1384,6 +1394,11 @@ public class HMaster extends HRegionServer implements MasterServices {
Collections.shuffle(allEnabledTables);
for (TableName table : allEnabledTables) {
+ if (isInMaintenanceMode()) {
+ LOG.debug("Master is in maintenance mode, stop running region normalizer.");
+ return false;
+ }
+
HTableDescriptor tblDesc = getTableDescriptors().get(table);
if (table.isSystemTable() || (tblDesc != null &&
!tblDesc.isNormalizationEnabled())) {
@@ -2364,6 +2379,21 @@ public class HMaster extends HRegionServer implements MasterServices {
return initialized.isReady();
}
+ /**
+ * Report whether this master is in maintenance mode.
+ *
+ * @return true if master is in maintenanceMode
+ */
+ @Override
+ public boolean isInMaintenanceMode() {
+ try {
+ return ZKUtil.nodeHasChildren(zooKeeper, ZooKeeperWatcher.masterMaintZNode);
+ } catch (KeeperException e) {
+ // Ignore the ZK keeper exception
+ return false;
+ }
+ }
+
@VisibleForTesting
public void setInitialized(boolean isInitialized) {
procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized);
@@ -2855,7 +2885,7 @@ public class HMaster extends HRegionServer implements MasterServices {
* @return The state of the switch
*/
public boolean isSplitOrMergeEnabled(MasterSwitchType switchType) {
- if (null == splitOrMergeTracker) {
+ if (null == splitOrMergeTracker || isInMaintenanceMode()) {
return false;
}
return splitOrMergeTracker.isSplitOrMergeEnabled(switchType);
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 2f65e97..312648d 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -1271,6 +1271,15 @@ public class MasterRpcServices extends RSRpcServices
}
@Override
+ public IsInMaintenanceModeResponse isMasterInMaintenanceMode(
+ final RpcController controller,
+ final IsInMaintenanceModeRequest request) throws ServiceException {
+ IsInMaintenanceModeResponse.Builder response = IsInMaintenanceModeResponse.newBuilder();
+ response.setInMaintenanceMode(master.isInMaintenanceMode());
+ return response.build();
+ }
+
+ @Override
public UnassignRegionResponse unassignRegion(RpcController controller,
UnassignRegionRequest req) throws ServiceException {
try {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 21f14e8..f4c9603 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -303,6 +303,11 @@ public interface MasterServices extends Server {
boolean isInitialized();
/**
+ * @return true if master is in maintanceMode
+ */
+ boolean isInMaintenanceMode();
+
+ /**
* Abort a procedure.
* @param procId ID of the procedure
* @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted?
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index ea91d90..e1a0f85 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hbase.util;
-import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
@@ -33,7 +32,6 @@ import java.io.IOException;
import java.io.InterruptedIOException;
import java.io.PrintWriter;
import java.io.StringWriter;
-import java.net.InetAddress;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
@@ -56,13 +54,8 @@ import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
import java.util.concurrent.Future;
-import java.util.concurrent.FutureTask;
import java.util.concurrent.ScheduledThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.io.IOUtils;
@@ -72,12 +65,10 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
@@ -104,7 +95,6 @@ import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
@@ -134,11 +124,8 @@ import org.apache.hadoop.hbase.util.hbck.TableLockChecker;
import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.wal.WALFactory;
import org.apache.hadoop.hbase.wal.WALSplitter;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Tool;
@@ -200,15 +187,9 @@ public class HBaseFsck extends Configured implements Closeable {
private static final int DEFAULT_OVERLAPS_TO_SIDELINE = 2;
private static final int DEFAULT_MAX_MERGE = 5;
private static final String TO_BE_LOADED = "to_be_loaded";
- private static final String HBCK_LOCK_FILE = "hbase-hbck.lock";
- private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5;
- private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds
- private static final int DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds
- // We have to set the timeout value > HdfsConstants.LEASE_SOFTLIMIT_PERIOD.
- // In HADOOP-2.6 and later, the Namenode proxy now created with custom RetryPolicy for
- // AlreadyBeingCreatedException which is implies timeout on this operations up to
- // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds).
- private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds
+ private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5;
+ private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds
+ private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds
/**********************
* Internal resources
@@ -223,12 +204,6 @@ public class HBaseFsck extends Configured implements Closeable {
private long startMillis = EnvironmentEdgeManager.currentTime();
private HFileCorruptionChecker hfcc;
private int retcode = 0;
- private Path HBCK_LOCK_PATH;
- private FSDataOutputStream hbckOutFd;
- // This lock is to prevent cleanup of balancer resources twice between
- // ShutdownHook and the main code. We cleanup only if the connect() is
- // successful
- private final AtomicBoolean hbckLockCleanup = new AtomicBoolean(false);
/***********
* Options
@@ -236,8 +211,6 @@ public class HBaseFsck extends Configured implements Closeable {
private static boolean details = false; // do we display the full report
private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older
private static boolean forceExclusive = false; // only this hbck can modify HBase
- private static boolean disableBalancer = false; // disable load balancer to keep regions stable
- private static boolean disableSplitAndMerge = false; // disable split and merge
private boolean fixAssignments = false; // fix assignment errors?
private boolean fixMeta = false; // fix meta errors?
private boolean checkHdfs = true; // load and check fs consistency?
@@ -305,11 +278,11 @@ public class HBaseFsck extends Configured implements Closeable {
new HashMap>();
private Map tableStates =
new HashMap();
- private final RetryCounterFactory lockFileRetryCounterFactory;
-
+ private final RetryCounterFactory createZNodeRetryCounterFactory;
private Map> skippedRegions = new HashMap>();
- ZooKeeperWatcher zkw = null;
+ private ZooKeeperWatcher zkw = null;
+ private boolean hbckZodeCreated = false;
/**
* Constructor
*
@@ -342,130 +315,63 @@ public class HBaseFsck extends Configured implements Closeable {
super(conf);
errors = getErrorReporter(getConf());
this.executor = exec;
- lockFileRetryCounterFactory = new RetryCounterFactory(
- getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS),
+ createZNodeRetryCounterFactory = new RetryCounterFactory(
+ getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS),
getConf().getInt(
- "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL),
+ "hbase.hbck.createznode.attempt.sleep.interval",
+ DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL),
getConf().getInt(
- "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME));
+ "hbase.hbck.createznode.attempt.maxsleeptime",
+ DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME));
zkw = createZooKeeperWatcher();
}
- private class FileLockCallable implements Callable {
- RetryCounter retryCounter;
-
- public FileLockCallable(RetryCounter retryCounter) {
- this.retryCounter = retryCounter;
- }
- @Override
- public FSDataOutputStream call() throws IOException {
+ /**
+ * This method maintains an ephemeral znode. If the creation fails we return false or throw
+ * exception
+ *
+ * @return true if creating znode succeeds; false otherwise
+ * @throws IOException if IO failure occurs
+ */
+ private Boolean checkAndMarkRunningHbck() throws IOException {
+ RetryCounter retryCounter = createZNodeRetryCounterFactory.create();
+ do {
try {
- FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
- FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),
- HConstants.DATA_FILE_UMASK_KEY);
- Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);
- fs.mkdirs(tmpDir);
- HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);
- final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);
- out.writeBytes(InetAddress.getLocalHost().toString());
- out.flush();
- return out;
- } catch(RemoteException e) {
- if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){
- return null;
- } else {
- throw e;
+ hbckZodeCreated = ZKUtil.createEphemeralNodeAndWatch(zkw, ZooKeeperWatcher.hbckZNode, null);
+ if (hbckZodeCreated) {
+ break;
}
+ } catch (KeeperException e) {
+ if (retryCounter.getAttemptTimes() >= retryCounter.getMaxAttempts()) {
+ throw new IOException("Can't create znode " + ZooKeeperWatcher.hbckZNode, e);
+ }
+ // fall through and retry
}
- }
- private FSDataOutputStream createFileWithRetries(final FileSystem fs,
- final Path hbckLockFilePath, final FsPermission defaultPerms)
- throws IOException {
+ LOG.warn("Fail to create znode " + ZooKeeperWatcher.hbckZNode + ", try=" +
+ (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts());
- IOException exception = null;
- do {
- try {
- return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false);
- } catch (IOException ioe) {
- LOG.info("Failed to create lock file " + hbckLockFilePath.getName()
- + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of "
- + retryCounter.getMaxAttempts());
- LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(),
- ioe);
- try {
- exception = ioe;
- retryCounter.sleepUntilNextRetry();
- } catch (InterruptedException ie) {
- throw (InterruptedIOException) new InterruptedIOException(
- "Can't create lock file " + hbckLockFilePath.getName())
- .initCause(ie);
- }
- }
- } while (retryCounter.shouldRetry());
-
- throw exception;
- }
+ try {
+ retryCounter.sleepUntilNextRetry();
+ } catch (InterruptedException ie) {
+ throw (InterruptedIOException) new InterruptedIOException(
+ "Can't create znode " + ZooKeeperWatcher.hbckZNode).initCause(ie);
+ }
+ } while (retryCounter.shouldRetry());
+ return hbckZodeCreated;
}
- /**
- * This method maintains a lock using a file. If the creation fails we return null
- *
- * @return FSDataOutputStream object corresponding to the newly opened lock file
- * @throws IOException if IO failure occurs
- */
- private FSDataOutputStream checkAndMarkRunningHbck() throws IOException {
- RetryCounter retryCounter = lockFileRetryCounterFactory.create();
- FileLockCallable callable = new FileLockCallable(retryCounter);
- ExecutorService executor = Executors.newFixedThreadPool(1);
- FutureTask futureTask = new FutureTask(callable);
- executor.execute(futureTask);
- final int timeoutInSeconds = getConf().getInt(
- "hbase.hbck.lockfile.maxwaittime", DEFAULT_WAIT_FOR_LOCK_TIMEOUT);
- FSDataOutputStream stream = null;
+ private void clenupHbckZnode() {
try {
- stream = futureTask.get(timeoutInSeconds, TimeUnit.SECONDS);
- } catch (ExecutionException ee) {
- LOG.warn("Encountered exception when opening lock file", ee);
- } catch (InterruptedException ie) {
- LOG.warn("Interrupted when opening lock file", ie);
- Thread.currentThread().interrupt();
- } catch (TimeoutException exception) {
- // took too long to obtain lock
- LOG.warn("Took more than " + timeoutInSeconds + " seconds in obtaining lock");
- futureTask.cancel(true);
- } finally {
- executor.shutdownNow();
- }
- return stream;
- }
-
- private void unlockHbck() {
- if (isExclusive() && hbckLockCleanup.compareAndSet(true, false)) {
- RetryCounter retryCounter = lockFileRetryCounterFactory.create();
- do {
- try {
- IOUtils.closeQuietly(hbckOutFd);
- FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()),
- HBCK_LOCK_PATH, true);
- LOG.info("Finishing hbck");
- return;
- } catch (IOException ioe) {
- LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try="
- + (retryCounter.getAttemptTimes() + 1) + " of "
- + retryCounter.getMaxAttempts());
- LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe);
- try {
- retryCounter.sleepUntilNextRetry();
- } catch (InterruptedException ie) {
- Thread.currentThread().interrupt();
- LOG.warn("Interrupted while deleting lock file" +
- HBCK_LOCK_PATH);
- return;
- }
- }
- } while (retryCounter.shouldRetry());
+ if (zkw != null && hbckZodeCreated) {
+ ZKUtil.deleteNode(zkw, ZooKeeperWatcher.hbckZNode);
+ hbckZodeCreated = false;
+ }
+ } catch (KeeperException e) {
+ // Ignore
+ LOG.warn("Delete HBCK znode " + ZooKeeperWatcher.hbckZNode + " failed ", e);
}
+ LOG.info("Finishing hbck");
}
/**
@@ -473,31 +379,25 @@ public class HBaseFsck extends Configured implements Closeable {
* online state.
*/
public void connect() throws IOException {
-
if (isExclusive()) {
- // Grab the lock
- hbckOutFd = checkAndMarkRunningHbck();
- if (hbckOutFd == null) {
+ Boolean success = checkAndMarkRunningHbck();
+ if (!success) {
setRetCode(-1);
LOG.error("Another instance of hbck is fixing HBase, exiting this instance. " +
- "[If you are sure no other instance is running, delete the lock file " +
- HBCK_LOCK_PATH + " and rerun the tool]");
+ "[If you are sure no other instance is running, delete the znode " +
+ ZooKeeperWatcher.hbckZNode + " and rerun the tool]");
throw new IOException("Duplicate hbck - Abort");
}
-
- // Make sure to cleanup the lock
- hbckLockCleanup.set(true);
}
-
// Add a shutdown hook to this thread, in case user tries to
- // kill the hbck with a ctrl-c, we want to cleanup the lock so that
+ // kill the hbck with a ctrl-c, we want to cleanup the znode so that
// it is available for further calls
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
IOUtils.closeQuietly(HBaseFsck.this);
- unlockHbck();
+ clenupHbckZnode();
}
});
@@ -684,34 +584,7 @@ public class HBaseFsck extends Configured implements Closeable {
errors.print("Version: " + status.getHBaseVersion());
offlineHdfsIntegrityRepair();
- boolean oldBalancer = false;
- if (shouldDisableBalancer()) {
- oldBalancer = admin.setBalancerRunning(false, true);
- }
- boolean[] oldSplitAndMerge = null;
- if (shouldDisableSplitAndMerge()) {
- admin.releaseSplitOrMergeLockAndRollback();
- oldSplitAndMerge = admin.setSplitOrMergeEnabled(false, false, false,
- MasterSwitchType.SPLIT, MasterSwitchType.MERGE);
- }
-
- try {
- onlineConsistencyRepair();
- }
- finally {
- // Only restore the balancer if it was true when we started repairing and
- // we actually disabled it. Otherwise, we might clobber another run of
- // hbck that has just restored it.
- if (shouldDisableBalancer() && oldBalancer) {
- admin.setBalancerRunning(oldBalancer, false);
- }
-
- if (shouldDisableSplitAndMerge()) {
- if (oldSplitAndMerge != null) {
- admin.releaseSplitOrMergeLockAndRollback();
- }
- }
- }
+ onlineConsistencyRepair();
if (checkRegionBoundaries) {
checkRegionBoundaries();
@@ -723,8 +596,8 @@ public class HBaseFsck extends Configured implements Closeable {
checkAndFixReplication();
- // Remove the hbck lock
- unlockHbck();
+ // Remove the hbck znode
+ clenupHbckZnode();
// Print table summary
printTableSummary(tablesInfo);
@@ -743,7 +616,7 @@ public class HBaseFsck extends Configured implements Closeable {
@Override
public void close() throws IOException {
try {
- unlockHbck();
+ clenupHbckZnode();
} catch (Exception io) {
LOG.warn(io);
} finally {
@@ -1789,11 +1662,6 @@ public class HBaseFsck extends Configured implements Closeable {
});
}
- private ServerName getMetaRegionServerName(int replicaId)
- throws IOException, KeeperException {
- return new MetaTableLocator().getMetaRegionLocation(zkw, replicaId);
- }
-
/**
* Contacts each regionserver and fetches metadata about regions.
* @param regionServerList - the list of region servers to connect to
@@ -4179,43 +4047,6 @@ public class HBaseFsck extends Configured implements Closeable {
}
/**
- * Disable the load balancer.
- */
- public static void setDisableBalancer() {
- disableBalancer = true;
- }
-
- /**
- * Disable the split and merge
- */
- public static void setDisableSplitAndMerge() {
- setDisableSplitAndMerge(true);
- }
-
- @VisibleForTesting
- public static void setDisableSplitAndMerge(boolean flag) {
- disableSplitAndMerge = flag;
- }
-
- /**
- * The balancer should be disabled if we are modifying HBase.
- * It can be disabled if you want to prevent region movement from causing
- * false positives.
- */
- public boolean shouldDisableBalancer() {
- return fixAny || disableBalancer;
- }
-
- /**
- * The split and merge should be disabled if we are modifying HBase.
- * It can be disabled if you want to prevent region movement from causing
- * false positives.
- */
- public boolean shouldDisableSplitAndMerge() {
- return fixAny || disableSplitAndMerge;
- }
-
- /**
* Set summary mode.
* Print only summary of the tables and status (OK or INCONSISTENT)
*/
@@ -4572,10 +4403,6 @@ public class HBaseFsck extends Configured implements Closeable {
setDisplayFullReport();
} else if (cmd.equals("-exclusive")) {
setForceExclusive();
- } else if (cmd.equals("-disableBalancer")) {
- setDisableBalancer();
- } else if (cmd.equals("-disableSplitAndMerge")) {
- setDisableSplitAndMerge();
} else if (cmd.equals("-timelag")) {
if (i == args.length - 1) {
errors.reportError(ERROR_CODE.WRONG_USAGE, "HBaseFsck: -timelag needs a value.");
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java
index b975c43..01206d4 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java
@@ -231,6 +231,4 @@ public class SplitOrMergeTracker {
return builder.build();
}
}
-
-
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 60b62e4..46b0319 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -297,6 +297,11 @@ public class MockNoopMasterServices implements MasterServices, Server {
}
@Override
+ public boolean isInMaintenanceMode() {
+ return false;
+ }
+
+ @Override
public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
return 0;
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
index 84ef6da..d8b01f0 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
@@ -63,7 +62,6 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.junit.AfterClass;
import org.junit.Assert;
@@ -72,16 +70,12 @@ import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.experimental.categories.Category;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.HashMap;
import java.util.Map;
-import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
@@ -95,8 +89,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.*;
import static org.junit.Assert.*;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.spy;
@Category({MiscTests.class, LargeTests.class})
public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
@@ -484,10 +476,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
@Override
public HBaseFsck call(){
Configuration c = new Configuration(conf);
- c.setInt("hbase.hbck.lockfile.attempts", 1);
- // HBASE-13574 found that in HADOOP-2.6 and later, the create file would internally retry.
- // To avoid flakiness of the test, set low max wait time.
- c.setInt("hbase.hbck.lockfile.maxwaittime", 3);
+ c.setInt("hbase.hbck.createznode.attempts", 1);
try{
return doFsck(c, true); // Exclusive hbck only when fixing
} catch(Exception e){
@@ -532,25 +521,20 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
// With the ExponentialBackoffPolicyWithLimit (starting with 200 milliseconds sleep time, and
// max sleep time of 5 seconds), we can retry around 15 times within 80 seconds before bail out.
//
- // Note: the reason to use 80 seconds is that in HADOOP-2.6 and later, the create file would
- // retry up to HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds). See HBASE-13574 for more
- // details.
final int timeoutInSeconds = 80;
final int sleepIntervalInMilliseconds = 200;
- final int maxSleepTimeInMilliseconds = 6000;
+ final int maxSleepTimeInMilliseconds = 5000;
final int maxRetryAttempts = 15;
class RunHbck implements Callable{
-
@Override
public HBaseFsck call() throws Exception {
// Increase retry attempts to make sure the non-active hbck doesn't get starved
Configuration c = new Configuration(conf);
- c.setInt("hbase.hbck.lockfile.maxwaittime", timeoutInSeconds);
- c.setInt("hbase.hbck.lockfile.attempt.sleep.interval", sleepIntervalInMilliseconds);
- c.setInt("hbase.hbck.lockfile.attempt.maxsleeptime", maxSleepTimeInMilliseconds);
- c.setInt("hbase.hbck.lockfile.attempts", maxRetryAttempts);
- return doFsck(c, false);
+ c.setInt("hbase.hbck.createznode.attempt.sleep.interval", sleepIntervalInMilliseconds);
+ c.setInt("hbase.hbck.createznode.attempt.maxsleeptime", maxSleepTimeInMilliseconds);
+ c.setInt("hbase.hbck.createznode.attempts", maxRetryAttempts);
+ return doFsck(c, true); // Exclusive hbck only when fixing
}
}
@@ -567,7 +551,6 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
assertNotNull(h2);
assert(h1.getRetCode() >= 0);
assert(h2.getRetCode() >= 0);
-
}
@Test (timeout = 180000)
@@ -1850,53 +1833,4 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
doQuarantineTest(table, hbck, 3, 0, 0, 0, 1);
hbck.close();
}
-
- /**
- * See HBASE-15406
- * */
- @Test
- public void testSplitOrMergeStatWhenHBCKAbort() throws Exception {
- admin.setSplitOrMergeEnabled(true, false, true,
- MasterSwitchType.SPLIT, MasterSwitchType.MERGE);
- boolean oldSplit = admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT);
- boolean oldMerge = admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE);
-
- assertTrue(oldSplit);
- assertTrue(oldMerge);
-
- ExecutorService exec = new ScheduledThreadPoolExecutor(10);
- HBaseFsck hbck = new HBaseFsck(conf, exec);
- HBaseFsck.setDisplayFullReport(); // i.e. -details
- final HBaseFsck spiedHbck = spy(hbck);
- doAnswer(new Answer() {
- @Override
- public Object answer(InvocationOnMock invocation) throws Throwable {
- // we close splitOrMerge flag in hbck, so in finally hbck will not set splitOrMerge back.
- spiedHbck.setDisableSplitAndMerge(false);
- return null;
- }
- }).when(spiedHbck).onlineConsistencyRepair();
- spiedHbck.setDisableSplitAndMerge();
- spiedHbck.connect();
- spiedHbck.onlineHbck();
- spiedHbck.close();
-
- boolean split = admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT);
- boolean merge = admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE);
- assertFalse(split);
- assertFalse(merge);
-
- // rerun hbck to repair the switches state
- hbck = new HBaseFsck(conf, exec);
- hbck.setDisableSplitAndMerge();
- hbck.connect();
- hbck.onlineHbck();
- hbck.close();
-
- split = admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT);
- merge = admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE);
-
- assertTrue(split);
- assertTrue(merge);
- }
}