done);
+ /**
+ * rpc getLastMajorCompactionTimestamp(.MajorCompactionTimestampRequest) returns (.MajorCompactionTimestampResponse);
+ *
+ *
+ ** Returns the timestamp of the last major compaction
+ *
+ */
+ public abstract void getLastMajorCompactionTimestamp(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ /**
+ * rpc getLastMajorCompactionTimestampForRegion(.MajorCompactionTimestampForRegionRequest) returns (.MajorCompactionTimestampResponse);
+ *
+ *
+ ** Returns the timestamp of the last major compaction
+ *
+ */
+ public abstract void getLastMajorCompactionTimestampForRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request,
+ com.google.protobuf.RpcCallback done);
+
}
public static com.google.protobuf.Service newReflectiveService(
@@ -45319,6 +46907,22 @@ public final class MasterProtos {
impl.setQuota(controller, request, done);
}
+ @java.lang.Override
+ public void getLastMajorCompactionTimestamp(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request,
+ com.google.protobuf.RpcCallback done) {
+ impl.getLastMajorCompactionTimestamp(controller, request, done);
+ }
+
+ @java.lang.Override
+ public void getLastMajorCompactionTimestampForRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request,
+ com.google.protobuf.RpcCallback done) {
+ impl.getLastMajorCompactionTimestampForRegion(controller, request, done);
+ }
+
};
}
@@ -45431,6 +47035,10 @@ public final class MasterProtos {
return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request);
case 44:
return impl.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request);
+ case 45:
+ return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request);
+ case 46:
+ return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -45535,6 +47143,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
case 44:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance();
+ case 45:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
+ case 46:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -45639,6 +47251,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
case 44:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance();
+ case 45:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
+ case 46:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -46213,6 +47829,30 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request,
com.google.protobuf.RpcCallback done);
+ /**
+ * rpc getLastMajorCompactionTimestamp(.MajorCompactionTimestampRequest) returns (.MajorCompactionTimestampResponse);
+ *
+ *
+ ** Returns the timestamp of the last major compaction
+ *
+ */
+ public abstract void getLastMajorCompactionTimestamp(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ /**
+ * rpc getLastMajorCompactionTimestampForRegion(.MajorCompactionTimestampForRegionRequest) returns (.MajorCompactionTimestampResponse);
+ *
+ *
+ ** Returns the timestamp of the last major compaction
+ *
+ */
+ public abstract void getLastMajorCompactionTimestampForRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request,
+ com.google.protobuf.RpcCallback done);
+
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
@@ -46460,6 +48100,16 @@ public final class MasterProtos {
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
+ case 45:
+ this.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
+ case 46:
+ this.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -46564,6 +48214,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance();
case 44:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance();
+ case 45:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
+ case 46:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -46668,6 +48322,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance();
case 44:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance();
+ case 45:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
+ case 46:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -47363,6 +49021,36 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance()));
}
+
+ public void getLastMajorCompactionTimestamp(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(45),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()));
+ }
+
+ public void getLastMajorCompactionTimestampForRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(46),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()));
+ }
}
public static BlockingInterface newBlockingStub(
@@ -47595,6 +49283,16 @@ public final class MasterProtos {
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request)
throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request)
+ throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request)
+ throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
@@ -48143,6 +49841,30 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance());
}
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(45),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance());
+ }
+
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(46),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance());
+ }
+
}
// @@protoc_insertion_point(class_scope:MasterService)
@@ -48578,6 +50300,21 @@ public final class MasterProtos {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_SetQuotaResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_MajorCompactionTimestampRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_MajorCompactionTimestampRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_MajorCompactionTimestampForRegionRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_MajorCompactionTimestampForRegionRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_MajorCompactionTimestampResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_MajorCompactionTimestampResponse_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@@ -48707,88 +50444,99 @@ public final class MasterProtos {
"\021\n\tnamespace\030\003 \001(\t\022\036\n\ntable_name\030\004 \001(\0132\n" +
".TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypass" +
"_globals\030\006 \001(\010\022\"\n\010throttle\030\007 \001(\0132\020.Throt",
- "tleRequest\"\022\n\020SetQuotaResponse2\346\030\n\rMaste" +
- "rService\022S\n\024GetSchemaAlterStatus\022\034.GetSc" +
- "hemaAlterStatusRequest\032\035.GetSchemaAlterS" +
- "tatusResponse\022P\n\023GetTableDescriptors\022\033.G" +
- "etTableDescriptorsRequest\032\034.GetTableDesc" +
- "riptorsResponse\022>\n\rGetTableNames\022\025.GetTa" +
- "bleNamesRequest\032\026.GetTableNamesResponse\022" +
- "G\n\020GetClusterStatus\022\030.GetClusterStatusRe" +
- "quest\032\031.GetClusterStatusResponse\022D\n\017IsMa" +
- "sterRunning\022\027.IsMasterRunningRequest\032\030.I",
- "sMasterRunningResponse\0222\n\tAddColumn\022\021.Ad" +
- "dColumnRequest\032\022.AddColumnResponse\022;\n\014De" +
- "leteColumn\022\024.DeleteColumnRequest\032\025.Delet" +
- "eColumnResponse\022;\n\014ModifyColumn\022\024.Modify" +
- "ColumnRequest\032\025.ModifyColumnResponse\0225\n\n" +
- "MoveRegion\022\022.MoveRegionRequest\032\023.MoveReg" +
- "ionResponse\022Y\n\026DispatchMergingRegions\022\036." +
- "DispatchMergingRegionsRequest\032\037.Dispatch" +
- "MergingRegionsResponse\022;\n\014AssignRegion\022\024" +
- ".AssignRegionRequest\032\025.AssignRegionRespo",
- "nse\022A\n\016UnassignRegion\022\026.UnassignRegionRe" +
- "quest\032\027.UnassignRegionResponse\022>\n\rOfflin" +
- "eRegion\022\025.OfflineRegionRequest\032\026.Offline" +
- "RegionResponse\0228\n\013DeleteTable\022\023.DeleteTa" +
- "bleRequest\032\024.DeleteTableResponse\022>\n\rtrun" +
- "cateTable\022\025.TruncateTableRequest\032\026.Trunc" +
- "ateTableResponse\0228\n\013EnableTable\022\023.Enable" +
- "TableRequest\032\024.EnableTableResponse\022;\n\014Di" +
- "sableTable\022\024.DisableTableRequest\032\025.Disab" +
- "leTableResponse\0228\n\013ModifyTable\022\023.ModifyT",
- "ableRequest\032\024.ModifyTableResponse\0228\n\013Cre" +
- "ateTable\022\023.CreateTableRequest\032\024.CreateTa" +
- "bleResponse\022/\n\010Shutdown\022\020.ShutdownReques" +
- "t\032\021.ShutdownResponse\0225\n\nStopMaster\022\022.Sto" +
- "pMasterRequest\032\023.StopMasterResponse\022,\n\007B" +
- "alance\022\017.BalanceRequest\032\020.BalanceRespons" +
- "e\022M\n\022SetBalancerRunning\022\032.SetBalancerRun" +
- "ningRequest\032\033.SetBalancerRunningResponse" +
- "\022A\n\016RunCatalogScan\022\026.RunCatalogScanReque" +
- "st\032\027.RunCatalogScanResponse\022S\n\024EnableCat",
- "alogJanitor\022\034.EnableCatalogJanitorReques" +
- "t\032\035.EnableCatalogJanitorResponse\022\\\n\027IsCa" +
- "talogJanitorEnabled\022\037.IsCatalogJanitorEn" +
- "abledRequest\032 .IsCatalogJanitorEnabledRe" +
- "sponse\022L\n\021ExecMasterService\022\032.Coprocesso" +
- "rServiceRequest\032\033.CoprocessorServiceResp" +
- "onse\022/\n\010Snapshot\022\020.SnapshotRequest\032\021.Sna" +
- "pshotResponse\022V\n\025GetCompletedSnapshots\022\035" +
- ".GetCompletedSnapshotsRequest\032\036.GetCompl" +
- "etedSnapshotsResponse\022A\n\016DeleteSnapshot\022",
- "\026.DeleteSnapshotRequest\032\027.DeleteSnapshot" +
- "Response\022A\n\016IsSnapshotDone\022\026.IsSnapshotD" +
- "oneRequest\032\027.IsSnapshotDoneResponse\022D\n\017R" +
- "estoreSnapshot\022\027.RestoreSnapshotRequest\032" +
- "\030.RestoreSnapshotResponse\022V\n\025IsRestoreSn" +
- "apshotDone\022\035.IsRestoreSnapshotDoneReques" +
- "t\032\036.IsRestoreSnapshotDoneResponse\022>\n\rExe" +
- "cProcedure\022\025.ExecProcedureRequest\032\026.Exec" +
- "ProcedureResponse\022E\n\024ExecProcedureWithRe" +
- "t\022\025.ExecProcedureRequest\032\026.ExecProcedure",
- "Response\022D\n\017IsProcedureDone\022\027.IsProcedur" +
- "eDoneRequest\032\030.IsProcedureDoneResponse\022D" +
- "\n\017ModifyNamespace\022\027.ModifyNamespaceReque" +
- "st\032\030.ModifyNamespaceResponse\022D\n\017CreateNa" +
- "mespace\022\027.CreateNamespaceRequest\032\030.Creat" +
- "eNamespaceResponse\022D\n\017DeleteNamespace\022\027." +
- "DeleteNamespaceRequest\032\030.DeleteNamespace" +
- "Response\022Y\n\026GetNamespaceDescriptor\022\036.Get" +
- "NamespaceDescriptorRequest\032\037.GetNamespac" +
- "eDescriptorResponse\022_\n\030ListNamespaceDesc",
- "riptors\022 .ListNamespaceDescriptorsReques" +
- "t\032!.ListNamespaceDescriptorsResponse\022t\n\037" +
- "ListTableDescriptorsByNamespace\022\'.ListTa" +
- "bleDescriptorsByNamespaceRequest\032(.ListT" +
- "ableDescriptorsByNamespaceResponse\022b\n\031Li" +
- "stTableNamesByNamespace\022!.ListTableNames" +
- "ByNamespaceRequest\032\".ListTableNamesByNam" +
- "espaceResponse\022>\n\rGetTableState\022\025.GetTab" +
- "leStateRequest\032\026.GetTableStateResponse\022/" +
- "\n\010SetQuota\022\020.SetQuotaRequest\032\021.SetQuotaR",
- "esponseBB\n*org.apache.hadoop.hbase.proto" +
- "buf.generatedB\014MasterProtosH\001\210\001\001\240\001\001"
+ "tleRequest\"\022\n\020SetQuotaResponse\"A\n\037MajorC" +
+ "ompactionTimestampRequest\022\036\n\ntable_name\030" +
+ "\001 \002(\0132\n.TableName\"L\n(MajorCompactionTime" +
+ "stampForRegionRequest\022 \n\006region\030\001 \002(\0132\020." +
+ "RegionSpecifier\"@\n MajorCompactionTimest" +
+ "ampResponse\022\034\n\024compaction_timestamp\030\001 \002(" +
+ "\0032\310\032\n\rMasterService\022S\n\024GetSchemaAlterSta" +
+ "tus\022\034.GetSchemaAlterStatusRequest\032\035.GetS" +
+ "chemaAlterStatusResponse\022P\n\023GetTableDesc" +
+ "riptors\022\033.GetTableDescriptorsRequest\032\034.G",
+ "etTableDescriptorsResponse\022>\n\rGetTableNa" +
+ "mes\022\025.GetTableNamesRequest\032\026.GetTableNam" +
+ "esResponse\022G\n\020GetClusterStatus\022\030.GetClus" +
+ "terStatusRequest\032\031.GetClusterStatusRespo" +
+ "nse\022D\n\017IsMasterRunning\022\027.IsMasterRunning" +
+ "Request\032\030.IsMasterRunningResponse\0222\n\tAdd" +
+ "Column\022\021.AddColumnRequest\032\022.AddColumnRes" +
+ "ponse\022;\n\014DeleteColumn\022\024.DeleteColumnRequ" +
+ "est\032\025.DeleteColumnResponse\022;\n\014ModifyColu" +
+ "mn\022\024.ModifyColumnRequest\032\025.ModifyColumnR",
+ "esponse\0225\n\nMoveRegion\022\022.MoveRegionReques" +
+ "t\032\023.MoveRegionResponse\022Y\n\026DispatchMergin" +
+ "gRegions\022\036.DispatchMergingRegionsRequest" +
+ "\032\037.DispatchMergingRegionsResponse\022;\n\014Ass" +
+ "ignRegion\022\024.AssignRegionRequest\032\025.Assign" +
+ "RegionResponse\022A\n\016UnassignRegion\022\026.Unass" +
+ "ignRegionRequest\032\027.UnassignRegionRespons" +
+ "e\022>\n\rOfflineRegion\022\025.OfflineRegionReques" +
+ "t\032\026.OfflineRegionResponse\0228\n\013DeleteTable" +
+ "\022\023.DeleteTableRequest\032\024.DeleteTableRespo",
+ "nse\022>\n\rtruncateTable\022\025.TruncateTableRequ" +
+ "est\032\026.TruncateTableResponse\0228\n\013EnableTab" +
+ "le\022\023.EnableTableRequest\032\024.EnableTableRes" +
+ "ponse\022;\n\014DisableTable\022\024.DisableTableRequ" +
+ "est\032\025.DisableTableResponse\0228\n\013ModifyTabl" +
+ "e\022\023.ModifyTableRequest\032\024.ModifyTableResp" +
+ "onse\0228\n\013CreateTable\022\023.CreateTableRequest" +
+ "\032\024.CreateTableResponse\022/\n\010Shutdown\022\020.Shu" +
+ "tdownRequest\032\021.ShutdownResponse\0225\n\nStopM" +
+ "aster\022\022.StopMasterRequest\032\023.StopMasterRe",
+ "sponse\022,\n\007Balance\022\017.BalanceRequest\032\020.Bal" +
+ "anceResponse\022M\n\022SetBalancerRunning\022\032.Set" +
+ "BalancerRunningRequest\032\033.SetBalancerRunn" +
+ "ingResponse\022A\n\016RunCatalogScan\022\026.RunCatal" +
+ "ogScanRequest\032\027.RunCatalogScanResponse\022S" +
+ "\n\024EnableCatalogJanitor\022\034.EnableCatalogJa" +
+ "nitorRequest\032\035.EnableCatalogJanitorRespo" +
+ "nse\022\\\n\027IsCatalogJanitorEnabled\022\037.IsCatal" +
+ "ogJanitorEnabledRequest\032 .IsCatalogJanit" +
+ "orEnabledResponse\022L\n\021ExecMasterService\022\032",
+ ".CoprocessorServiceRequest\032\033.Coprocessor" +
+ "ServiceResponse\022/\n\010Snapshot\022\020.SnapshotRe" +
+ "quest\032\021.SnapshotResponse\022V\n\025GetCompleted" +
+ "Snapshots\022\035.GetCompletedSnapshotsRequest" +
+ "\032\036.GetCompletedSnapshotsResponse\022A\n\016Dele" +
+ "teSnapshot\022\026.DeleteSnapshotRequest\032\027.Del" +
+ "eteSnapshotResponse\022A\n\016IsSnapshotDone\022\026." +
+ "IsSnapshotDoneRequest\032\027.IsSnapshotDoneRe" +
+ "sponse\022D\n\017RestoreSnapshot\022\027.RestoreSnaps" +
+ "hotRequest\032\030.RestoreSnapshotResponse\022V\n\025",
+ "IsRestoreSnapshotDone\022\035.IsRestoreSnapsho" +
+ "tDoneRequest\032\036.IsRestoreSnapshotDoneResp" +
+ "onse\022>\n\rExecProcedure\022\025.ExecProcedureReq" +
+ "uest\032\026.ExecProcedureResponse\022E\n\024ExecProc" +
+ "edureWithRet\022\025.ExecProcedureRequest\032\026.Ex" +
+ "ecProcedureResponse\022D\n\017IsProcedureDone\022\027" +
+ ".IsProcedureDoneRequest\032\030.IsProcedureDon" +
+ "eResponse\022D\n\017ModifyNamespace\022\027.ModifyNam" +
+ "espaceRequest\032\030.ModifyNamespaceResponse\022" +
+ "D\n\017CreateNamespace\022\027.CreateNamespaceRequ",
+ "est\032\030.CreateNamespaceResponse\022D\n\017DeleteN" +
+ "amespace\022\027.DeleteNamespaceRequest\032\030.Dele" +
+ "teNamespaceResponse\022Y\n\026GetNamespaceDescr" +
+ "iptor\022\036.GetNamespaceDescriptorRequest\032\037." +
+ "GetNamespaceDescriptorResponse\022_\n\030ListNa" +
+ "mespaceDescriptors\022 .ListNamespaceDescri" +
+ "ptorsRequest\032!.ListNamespaceDescriptorsR" +
+ "esponse\022t\n\037ListTableDescriptorsByNamespa" +
+ "ce\022\'.ListTableDescriptorsByNamespaceRequ" +
+ "est\032(.ListTableDescriptorsByNamespaceRes",
+ "ponse\022b\n\031ListTableNamesByNamespace\022!.Lis" +
+ "tTableNamesByNamespaceRequest\032\".ListTabl" +
+ "eNamesByNamespaceResponse\022>\n\rGetTableSta" +
+ "te\022\025.GetTableStateRequest\032\026.GetTableStat" +
+ "eResponse\022/\n\010SetQuota\022\020.SetQuotaRequest\032" +
+ "\021.SetQuotaResponse\022f\n\037getLastMajorCompac" +
+ "tionTimestamp\022 .MajorCompactionTimestamp" +
+ "Request\032!.MajorCompactionTimestampRespon" +
+ "se\022x\n(getLastMajorCompactionTimestampFor" +
+ "Region\022).MajorCompactionTimestampForRegi",
+ "onRequest\032!.MajorCompactionTimestampResp" +
+ "onseBB\n*org.apache.hadoop.hbase.protobuf" +
+ ".generatedB\014MasterProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -49311,6 +51059,24 @@ public final class MasterProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_SetQuotaResponse_descriptor,
new java.lang.String[] { });
+ internal_static_MajorCompactionTimestampRequest_descriptor =
+ getDescriptor().getMessageTypes().get(86);
+ internal_static_MajorCompactionTimestampRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_MajorCompactionTimestampRequest_descriptor,
+ new java.lang.String[] { "TableName", });
+ internal_static_MajorCompactionTimestampForRegionRequest_descriptor =
+ getDescriptor().getMessageTypes().get(87);
+ internal_static_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_MajorCompactionTimestampForRegionRequest_descriptor,
+ new java.lang.String[] { "Region", });
+ internal_static_MajorCompactionTimestampResponse_descriptor =
+ getDescriptor().getMessageTypes().get(88);
+ internal_static_MajorCompactionTimestampResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_MajorCompactionTimestampResponse_descriptor,
+ new java.lang.String[] { "CompactionTimestamp", });
return null;
}
};
diff --git a/hbase-protocol/src/main/protobuf/ClusterStatus.proto b/hbase-protocol/src/main/protobuf/ClusterStatus.proto
index 7e78395..2b2d9eb 100644
--- a/hbase-protocol/src/main/protobuf/ClusterStatus.proto
+++ b/hbase-protocol/src/main/protobuf/ClusterStatus.proto
@@ -113,6 +113,8 @@ message RegionLoad {
/** The current data locality for region in the regionserver */
optional float data_locality = 16;
+
+ optional uint64 last_major_compaction_ts = 17 [default = 0];
}
/* Server-level protobufs */
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index e55dcc0..e7a3a99 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -387,6 +387,18 @@ message SetQuotaRequest {
message SetQuotaResponse {
}
+message MajorCompactionTimestampRequest {
+ required TableName table_name = 1;
+}
+
+message MajorCompactionTimestampForRegionRequest {
+ required RegionSpecifier region = 1;
+}
+
+message MajorCompactionTimestampResponse {
+ required int64 compaction_timestamp = 1;
+}
+
service MasterService {
/** Used by the client to get the number of regions that have received the updated schema */
rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest)
@@ -601,4 +613,12 @@ service MasterService {
/** Apply the new quota settings */
rpc SetQuota(SetQuotaRequest) returns(SetQuotaResponse);
+
+ /** Returns the timestamp of the last major compaction */
+ rpc getLastMajorCompactionTimestamp(MajorCompactionTimestampRequest)
+ returns(MajorCompactionTimestampResponse);
+
+ /** Returns the timestamp of the last major compaction */
+ rpc getLastMajorCompactionTimestampForRegion(MajorCompactionTimestampForRegionRequest)
+ returns(MajorCompactionTimestampResponse);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
index 2bef680..52491e6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
@@ -148,6 +148,9 @@ public abstract class AbstractHFileWriter implements HFile.Writer {
int avgValueLen =
entryCount == 0 ? 0 : (int) (totalValueLength / entryCount);
fileInfo.append(FileInfo.AVG_VALUE_LEN, Bytes.toBytes(avgValueLen), false);
+
+ fileInfo.append(FileInfo.CREATE_TIME_TS, Bytes.toBytes(hFileContext.getFileCreateTime()),
+ false);
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index 1e97f63..ad62d71 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -541,6 +541,7 @@ public class HFile {
static final byte [] LASTKEY = Bytes.toBytes(RESERVED_PREFIX + "LASTKEY");
static final byte [] AVG_KEY_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_KEY_LEN");
static final byte [] AVG_VALUE_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_VALUE_LEN");
+ static final byte [] CREATE_TIME_TS = Bytes.toBytes(RESERVED_PREFIX + "CREATE_TIME_TS");
static final byte [] COMPARATOR = Bytes.toBytes(RESERVED_PREFIX + "COMPARATOR");
static final byte [] TAGS_COMPRESSED = Bytes.toBytes(RESERVED_PREFIX + "TAGS_COMPRESSED");
public static final byte [] MAX_TAGS_LEN = Bytes.toBytes(RESERVED_PREFIX + "MAX_TAGS_LEN");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
index e466041..26cb6c9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
@@ -157,6 +157,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
// File info
fileInfo = new FileInfo();
fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
+ this.hfileContext.setFileCreateTime(Bytes.toLong(fileInfo.get(FileInfo.CREATE_TIME_TS)));
lastKey = fileInfo.get(FileInfo.LASTKEY);
avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN));
avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN));
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 809b311..81fbcb8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2285,4 +2285,14 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
}
}
+
+ @Override
+ public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
+ return getClusterStatus().getLastMajorCompactionTsForTable(table);
+ }
+
+ @Override
+ public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
+ return getClusterStatus().getLastMajorCompactionTsForRegion(regionName);
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 6930bf3..0e81461 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -47,14 +47,11 @@ import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.*;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
@@ -111,6 +108,9 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescript
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
@@ -1275,4 +1275,35 @@ public class MasterRpcServices extends RSRpcServices
throw new ServiceException(e);
}
}
+
+ @Override
+ public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(RpcController controller,
+ MajorCompactionTimestampRequest request) throws ServiceException {
+ MajorCompactionTimestampResponse.Builder response =
+ MajorCompactionTimestampResponse.newBuilder();
+ try {
+ master.checkInitialized();
+ response.setCompactionTimestamp(master.getLastMajorCompactionTimestamp(ProtobufUtil
+ .toTableName(request.getTableName())));
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ return response.build();
+ }
+
+ @Override
+ public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
+ RpcController controller, MajorCompactionTimestampForRegionRequest request)
+ throws ServiceException {
+ MajorCompactionTimestampResponse.Builder response =
+ MajorCompactionTimestampResponse.newBuilder();
+ try {
+ master.checkInitialized();
+ response.setCompactionTimestamp(master.getLastMajorCompactionTimestampForRegion(request
+ .getRegion().getValue().toByteArray()));
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ return response.build();
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 7733256..63f3119 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -261,4 +261,20 @@ public interface MasterServices extends Server {
* @throws IOException
*/
public List listTableNamesByNamespace(String name) throws IOException;
+
+ /**
+ * @param table
+ * @return the timestamp of the last successful major compaction for the passed table,
+ * or 0 if no HFile resulting from a major compaction exists
+ * @throws IOException
+ */
+ public long getLastMajorCompactionTimestamp(TableName table) throws IOException;
+
+ /**
+ * @param regionName
+ * @return the timestamp of the last successful major compaction for the passed region
+ * or 0 if no HFile resulting from a major compaction exists
+ * @throws IOException
+ */
+ public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 8e44b39..26f8943 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -117,6 +117,7 @@ import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
import org.apache.hadoop.hbase.ipc.RpcCallContext;
import org.apache.hadoop.hbase.ipc.RpcServer;
@@ -1494,6 +1495,28 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
return Collections.min(lastStoreFlushTimeMap.values());
}
+ /**
+ * This can be used to determine the last time all files of this region were major compacted.
+ * @param majorCompactioOnly Only consider HFile that are the result of major compaction
+ * @return the timestamp of the oldest HFile for all stores of this region
+ */
+ public long getOldestHfileTs(boolean majorCompactioOnly) throws IOException {
+ long result = Long.MAX_VALUE;
+ for (Store store : getStores().values()) {
+ for (StoreFile file : store.getStorefiles()) {
+ HFile.Reader reader = file.getReader().getHFileReader();
+ if (majorCompactioOnly) {
+ byte[] val = reader.loadFileInfo().get(StoreFile.MAJOR_COMPACTION_KEY);
+ if (val == null || !Bytes.toBoolean(val)) {
+ continue;
+ }
+ }
+ result = Math.min(result, reader.getFileContext().getFileCreateTime());
+ }
+ }
+ return result == Long.MAX_VALUE ? 0 : result;
+ }
+
//////////////////////////////////////////////////////////////////////////////
// HRegion maintenance.
//
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 5d66933..bc52eb8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1341,7 +1341,7 @@ public class HRegionServer extends HasThread implements
* @throws IOException
*/
private RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr,
- RegionSpecifier.Builder regionSpecifier) {
+ RegionSpecifier.Builder regionSpecifier) throws IOException {
byte[] name = r.getRegionName();
int stores = 0;
int storefiles = 0;
@@ -1403,8 +1403,8 @@ public class HRegionServer extends HasThread implements
.setTotalCompactingKVs(totalCompactingKVs)
.setCurrentCompactedKVs(currentCompactedKVs)
.setCompleteSequenceId(r.maxFlushedSeqId)
- .setDataLocality(dataLocality);
-
+ .setDataLocality(dataLocality)
+ .setLastMajorCompactionTs(r.getOldestHfileTs(true));
return regionLoadBldr.build();
}
@@ -1412,7 +1412,7 @@ public class HRegionServer extends HasThread implements
* @param encodedRegionName
* @return An instance of RegionLoad.
*/
- public RegionLoad createRegionLoad(final String encodedRegionName) {
+ public RegionLoad createRegionLoad(final String encodedRegionName) throws IOException {
HRegion r = null;
r = this.onlineRegions.get(encodedRegionName);
return r != null ? createRegionLoad(r, null, null) : null;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 047d689..942b47f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -991,6 +991,7 @@ public class HStore implements Store {
.withHBaseCheckSum(true)
.withDataBlockEncoding(family.getDataBlockEncoding())
.withEncryptionContext(cryptoContext)
+ .withCreateTime(EnvironmentEdgeManager.currentTime())
.build();
return hFileContext;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
index 9b55acd..85fbbc6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
@@ -409,6 +409,58 @@ public class TestAdmin1 {
}
@Test (timeout=300000)
+ public void testCompactionTimestamps() throws Exception {
+ HColumnDescriptor fam1 = new HColumnDescriptor("fam1");
+ TableName tableName = TableName.valueOf("testCompactionTimestampsTable");
+ HTableDescriptor htd = new HTableDescriptor(tableName);
+ htd.addFamily(fam1);
+ this.admin.createTable(htd);
+ HTable table = (HTable)TEST_UTIL.getConnection().getTable(htd.getTableName());
+ long ts = this.admin.getLastMajorCompactionTimestamp(tableName);
+ assertEquals(0, ts);
+ Put p = new Put(Bytes.toBytes("row1"));
+ p.add(Bytes.toBytes("fam1"), Bytes.toBytes("fam1"), Bytes.toBytes("fam1"));
+ table.put(p);
+ ts = this.admin.getLastMajorCompactionTimestamp(tableName);
+ // no files written -> no data
+ assertEquals(0, ts);
+
+ this.admin.flush(tableName);
+ ts = this.admin.getLastMajorCompactionTimestamp(tableName);
+ // still 0, we flushed a file, but no major compaction happened
+ assertEquals(0, ts);
+
+ byte[] regionName =
+ table.getRegionLocator().getAllRegionLocations().get(0).getRegionInfo().getRegionName();
+ long ts1 = this.admin.getLastMajorCompactionTimestampForRegion(regionName);
+ assertEquals(ts, ts1);
+ p = new Put(Bytes.toBytes("row2"));
+ p.add(Bytes.toBytes("fam1"), Bytes.toBytes("fam1"), Bytes.toBytes("fam1"));
+ table.put(p);
+ this.admin.flush(tableName);
+ ts = this.admin.getLastMajorCompactionTimestamp(tableName);
+ // make sure the region API returns the same value, as the old file is still around
+ assertEquals(ts1, ts);
+
+ TEST_UTIL.compact(tableName, true);
+ table.put(p);
+ // forces a wait for the compaction
+ this.admin.flush(tableName);
+ ts = this.admin.getLastMajorCompactionTimestamp(tableName);
+ // after a compaction our earliest timestamp will have progressed forward
+ assertTrue(ts > ts1);
+
+ // region api still the same
+ ts1 = this.admin.getLastMajorCompactionTimestampForRegion(regionName);
+ assertEquals(ts, ts1);
+ table.put(p);
+ this.admin.flush(tableName);
+ ts = this.admin.getLastMajorCompactionTimestamp(tableName);
+ assertEquals(ts, ts1);
+ table.close();
+ }
+
+ @Test (timeout=300000)
public void testHColumnValidName() {
boolean exceptionThrown;
try {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index cc501ed..fb7752e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -458,6 +458,18 @@ public class TestCatalogJanitor {
// Auto-generated method stub
return false;
}
+
+ @Override
+ public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
+ // Auto-generated method stub
+ return 0;
+ }
+
+ @Override
+ public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
+ // Auto-generated method stub
+ return 0;
+ }
}
@Test