done);
+ /**
+ * rpc getLastMajorCompactionTimestamp(.MajorCompactionTimestampRequest) returns (.MajorCompactionTimestampResponse);
+ *
+ *
+ ** Returns the timestamp of the last major compaction
+ *
+ */
+ public abstract void getLastMajorCompactionTimestamp(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ /**
+ * rpc getLastMajorCompactionTimestampForRegion(.MajorCompactionTimestampForRegionRequest) returns (.MajorCompactionTimestampResponse);
+ *
+ *
+ ** Returns the timestamp of the last major compaction
+ *
+ */
+ public abstract void getLastMajorCompactionTimestampForRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request,
+ com.google.protobuf.RpcCallback done);
+
}
public static com.google.protobuf.Service newReflectiveService(
@@ -42374,6 +43962,22 @@ public final class MasterProtos {
impl.listTableNamesByNamespace(controller, request, done);
}
+ @java.lang.Override
+ public void getLastMajorCompactionTimestamp(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request,
+ com.google.protobuf.RpcCallback done) {
+ impl.getLastMajorCompactionTimestamp(controller, request, done);
+ }
+
+ @java.lang.Override
+ public void getLastMajorCompactionTimestampForRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request,
+ com.google.protobuf.RpcCallback done) {
+ impl.getLastMajorCompactionTimestampForRegion(controller, request, done);
+ }
+
};
}
@@ -42482,6 +44086,10 @@ public final class MasterProtos {
return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request);
case 42:
return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request);
+ case 43:
+ return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request);
+ case 44:
+ return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -42582,6 +44190,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance();
case 42:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance();
+ case 43:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
+ case 44:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -42682,6 +44294,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance();
case 42:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance();
+ case 43:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
+ case 44:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -43232,6 +44848,30 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request,
com.google.protobuf.RpcCallback done);
+ /**
+ * rpc getLastMajorCompactionTimestamp(.MajorCompactionTimestampRequest) returns (.MajorCompactionTimestampResponse);
+ *
+ *
+ ** Returns the timestamp of the last major compaction
+ *
+ */
+ public abstract void getLastMajorCompactionTimestamp(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request,
+ com.google.protobuf.RpcCallback done);
+
+ /**
+ * rpc getLastMajorCompactionTimestampForRegion(.MajorCompactionTimestampForRegionRequest) returns (.MajorCompactionTimestampResponse);
+ *
+ *
+ ** Returns the timestamp of the last major compaction
+ *
+ */
+ public abstract void getLastMajorCompactionTimestampForRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request,
+ com.google.protobuf.RpcCallback done);
+
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
@@ -43469,6 +45109,16 @@ public final class MasterProtos {
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
+ case 43:
+ this.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
+ case 44:
+ this.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -43569,6 +45219,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance();
case 42:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance();
+ case 43:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
+ case 44:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -43669,6 +45323,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance();
case 42:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance();
+ case 43:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
+ case 44:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -44334,6 +45992,36 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance()));
}
+
+ public void getLastMajorCompactionTimestamp(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(43),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()));
+ }
+
+ public void getLastMajorCompactionTimestampForRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(44),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()));
+ }
}
public static BlockingInterface newBlockingStub(
@@ -44556,6 +46244,16 @@ public final class MasterProtos {
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request)
throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request)
+ throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request)
+ throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
@@ -45080,6 +46778,30 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance());
}
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(43),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance());
+ }
+
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(44),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance());
+ }
+
}
// @@protoc_insertion_point(class_scope:MasterService)
@@ -45495,6 +47217,21 @@ public final class MasterProtos {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_IsProcedureDoneResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_MajorCompactionTimestampRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_MajorCompactionTimestampRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_MajorCompactionTimestampForRegionRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_MajorCompactionTimestampForRegionRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_MajorCompactionTimestampResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_MajorCompactionTimestampResponse_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@@ -45616,85 +47353,96 @@ public final class MasterProtos {
"t\022(\n\tprocedure\030\001 \001(\0132\025.ProcedureDescript",
"ion\"W\n\027IsProcedureDoneResponse\022\023\n\004done\030\001" +
" \001(\010:\005false\022\'\n\010snapshot\030\002 \001(\0132\025.Procedur" +
- "eDescription2\365\027\n\rMasterService\022S\n\024GetSch" +
- "emaAlterStatus\022\034.GetSchemaAlterStatusReq" +
- "uest\032\035.GetSchemaAlterStatusResponse\022P\n\023G" +
- "etTableDescriptors\022\033.GetTableDescriptors" +
- "Request\032\034.GetTableDescriptorsResponse\022>\n" +
- "\rGetTableNames\022\025.GetTableNamesRequest\032\026." +
- "GetTableNamesResponse\022G\n\020GetClusterStatu" +
- "s\022\030.GetClusterStatusRequest\032\031.GetCluster",
- "StatusResponse\022D\n\017IsMasterRunning\022\027.IsMa" +
- "sterRunningRequest\032\030.IsMasterRunningResp" +
- "onse\0222\n\tAddColumn\022\021.AddColumnRequest\032\022.A" +
- "ddColumnResponse\022;\n\014DeleteColumn\022\024.Delet" +
- "eColumnRequest\032\025.DeleteColumnResponse\022;\n" +
- "\014ModifyColumn\022\024.ModifyColumnRequest\032\025.Mo" +
- "difyColumnResponse\0225\n\nMoveRegion\022\022.MoveR" +
- "egionRequest\032\023.MoveRegionResponse\022Y\n\026Dis" +
- "patchMergingRegions\022\036.DispatchMergingReg" +
- "ionsRequest\032\037.DispatchMergingRegionsResp",
- "onse\022;\n\014AssignRegion\022\024.AssignRegionReque" +
- "st\032\025.AssignRegionResponse\022A\n\016UnassignReg" +
- "ion\022\026.UnassignRegionRequest\032\027.UnassignRe" +
- "gionResponse\022>\n\rOfflineRegion\022\025.OfflineR" +
- "egionRequest\032\026.OfflineRegionResponse\0228\n\013" +
- "DeleteTable\022\023.DeleteTableRequest\032\024.Delet" +
- "eTableResponse\022>\n\rtruncateTable\022\025.Trunca" +
- "teTableRequest\032\026.TruncateTableResponse\0228" +
- "\n\013EnableTable\022\023.EnableTableRequest\032\024.Ena" +
- "bleTableResponse\022;\n\014DisableTable\022\024.Disab",
- "leTableRequest\032\025.DisableTableResponse\0228\n" +
- "\013ModifyTable\022\023.ModifyTableRequest\032\024.Modi" +
- "fyTableResponse\0228\n\013CreateTable\022\023.CreateT" +
- "ableRequest\032\024.CreateTableResponse\022/\n\010Shu" +
- "tdown\022\020.ShutdownRequest\032\021.ShutdownRespon" +
- "se\0225\n\nStopMaster\022\022.StopMasterRequest\032\023.S" +
- "topMasterResponse\022,\n\007Balance\022\017.BalanceRe" +
- "quest\032\020.BalanceResponse\022M\n\022SetBalancerRu" +
- "nning\022\032.SetBalancerRunningRequest\032\033.SetB" +
- "alancerRunningResponse\022A\n\016RunCatalogScan",
- "\022\026.RunCatalogScanRequest\032\027.RunCatalogSca" +
- "nResponse\022S\n\024EnableCatalogJanitor\022\034.Enab" +
- "leCatalogJanitorRequest\032\035.EnableCatalogJ" +
- "anitorResponse\022\\\n\027IsCatalogJanitorEnable" +
- "d\022\037.IsCatalogJanitorEnabledRequest\032 .IsC" +
- "atalogJanitorEnabledResponse\022L\n\021ExecMast" +
- "erService\022\032.CoprocessorServiceRequest\032\033." +
- "CoprocessorServiceResponse\022/\n\010Snapshot\022\020" +
- ".SnapshotRequest\032\021.SnapshotResponse\022V\n\025G" +
- "etCompletedSnapshots\022\035.GetCompletedSnaps",
- "hotsRequest\032\036.GetCompletedSnapshotsRespo" +
- "nse\022A\n\016DeleteSnapshot\022\026.DeleteSnapshotRe" +
- "quest\032\027.DeleteSnapshotResponse\022A\n\016IsSnap" +
- "shotDone\022\026.IsSnapshotDoneRequest\032\027.IsSna" +
- "pshotDoneResponse\022D\n\017RestoreSnapshot\022\027.R" +
- "estoreSnapshotRequest\032\030.RestoreSnapshotR" +
- "esponse\022V\n\025IsRestoreSnapshotDone\022\035.IsRes" +
- "toreSnapshotDoneRequest\032\036.IsRestoreSnaps" +
- "hotDoneResponse\022>\n\rExecProcedure\022\025.ExecP" +
- "rocedureRequest\032\026.ExecProcedureResponse\022",
- "E\n\024ExecProcedureWithRet\022\025.ExecProcedureR" +
- "equest\032\026.ExecProcedureResponse\022D\n\017IsProc" +
- "edureDone\022\027.IsProcedureDoneRequest\032\030.IsP" +
- "rocedureDoneResponse\022D\n\017ModifyNamespace\022" +
- "\027.ModifyNamespaceRequest\032\030.ModifyNamespa" +
- "ceResponse\022D\n\017CreateNamespace\022\027.CreateNa" +
- "mespaceRequest\032\030.CreateNamespaceResponse" +
- "\022D\n\017DeleteNamespace\022\027.DeleteNamespaceReq" +
- "uest\032\030.DeleteNamespaceResponse\022Y\n\026GetNam" +
- "espaceDescriptor\022\036.GetNamespaceDescripto",
- "rRequest\032\037.GetNamespaceDescriptorRespons" +
- "e\022_\n\030ListNamespaceDescriptors\022 .ListName" +
- "spaceDescriptorsRequest\032!.ListNamespaceD" +
- "escriptorsResponse\022t\n\037ListTableDescripto" +
- "rsByNamespace\022\'.ListTableDescriptorsByNa" +
- "mespaceRequest\032(.ListTableDescriptorsByN" +
- "amespaceResponse\022b\n\031ListTableNamesByName" +
- "space\022!.ListTableNamesByNamespaceRequest" +
- "\032\".ListTableNamesByNamespaceResponseBB\n*" +
- "org.apache.hadoop.hbase.protobuf.generat",
- "edB\014MasterProtosH\001\210\001\001\240\001\001"
+ "eDescription\"A\n\037MajorCompactionTimestamp" +
+ "Request\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\"" +
+ "L\n(MajorCompactionTimestampForRegionRequ" +
+ "est\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\"@\n" +
+ " MajorCompactionTimestampResponse\022\034\n\024com" +
+ "paction_timestamp\030\001 \002(\0032\327\031\n\rMasterServic" +
+ "e\022S\n\024GetSchemaAlterStatus\022\034.GetSchemaAlt" +
+ "erStatusRequest\032\035.GetSchemaAlterStatusRe",
+ "sponse\022P\n\023GetTableDescriptors\022\033.GetTable" +
+ "DescriptorsRequest\032\034.GetTableDescriptors" +
+ "Response\022>\n\rGetTableNames\022\025.GetTableName" +
+ "sRequest\032\026.GetTableNamesResponse\022G\n\020GetC" +
+ "lusterStatus\022\030.GetClusterStatusRequest\032\031" +
+ ".GetClusterStatusResponse\022D\n\017IsMasterRun" +
+ "ning\022\027.IsMasterRunningRequest\032\030.IsMaster" +
+ "RunningResponse\0222\n\tAddColumn\022\021.AddColumn" +
+ "Request\032\022.AddColumnResponse\022;\n\014DeleteCol" +
+ "umn\022\024.DeleteColumnRequest\032\025.DeleteColumn",
+ "Response\022;\n\014ModifyColumn\022\024.ModifyColumnR" +
+ "equest\032\025.ModifyColumnResponse\0225\n\nMoveReg" +
+ "ion\022\022.MoveRegionRequest\032\023.MoveRegionResp" +
+ "onse\022Y\n\026DispatchMergingRegions\022\036.Dispatc" +
+ "hMergingRegionsRequest\032\037.DispatchMerging" +
+ "RegionsResponse\022;\n\014AssignRegion\022\024.Assign" +
+ "RegionRequest\032\025.AssignRegionResponse\022A\n\016" +
+ "UnassignRegion\022\026.UnassignRegionRequest\032\027" +
+ ".UnassignRegionResponse\022>\n\rOfflineRegion" +
+ "\022\025.OfflineRegionRequest\032\026.OfflineRegionR",
+ "esponse\0228\n\013DeleteTable\022\023.DeleteTableRequ" +
+ "est\032\024.DeleteTableResponse\022>\n\rtruncateTab" +
+ "le\022\025.TruncateTableRequest\032\026.TruncateTabl" +
+ "eResponse\0228\n\013EnableTable\022\023.EnableTableRe" +
+ "quest\032\024.EnableTableResponse\022;\n\014DisableTa" +
+ "ble\022\024.DisableTableRequest\032\025.DisableTable" +
+ "Response\0228\n\013ModifyTable\022\023.ModifyTableReq" +
+ "uest\032\024.ModifyTableResponse\0228\n\013CreateTabl" +
+ "e\022\023.CreateTableRequest\032\024.CreateTableResp" +
+ "onse\022/\n\010Shutdown\022\020.ShutdownRequest\032\021.Shu",
+ "tdownResponse\0225\n\nStopMaster\022\022.StopMaster" +
+ "Request\032\023.StopMasterResponse\022,\n\007Balance\022" +
+ "\017.BalanceRequest\032\020.BalanceResponse\022M\n\022Se" +
+ "tBalancerRunning\022\032.SetBalancerRunningReq" +
+ "uest\032\033.SetBalancerRunningResponse\022A\n\016Run" +
+ "CatalogScan\022\026.RunCatalogScanRequest\032\027.Ru" +
+ "nCatalogScanResponse\022S\n\024EnableCatalogJan" +
+ "itor\022\034.EnableCatalogJanitorRequest\032\035.Ena" +
+ "bleCatalogJanitorResponse\022\\\n\027IsCatalogJa" +
+ "nitorEnabled\022\037.IsCatalogJanitorEnabledRe",
+ "quest\032 .IsCatalogJanitorEnabledResponse\022" +
+ "L\n\021ExecMasterService\022\032.CoprocessorServic" +
+ "eRequest\032\033.CoprocessorServiceResponse\022/\n" +
+ "\010Snapshot\022\020.SnapshotRequest\032\021.SnapshotRe" +
+ "sponse\022V\n\025GetCompletedSnapshots\022\035.GetCom" +
+ "pletedSnapshotsRequest\032\036.GetCompletedSna" +
+ "pshotsResponse\022A\n\016DeleteSnapshot\022\026.Delet" +
+ "eSnapshotRequest\032\027.DeleteSnapshotRespons" +
+ "e\022A\n\016IsSnapshotDone\022\026.IsSnapshotDoneRequ" +
+ "est\032\027.IsSnapshotDoneResponse\022D\n\017RestoreS",
+ "napshot\022\027.RestoreSnapshotRequest\032\030.Resto" +
+ "reSnapshotResponse\022V\n\025IsRestoreSnapshotD" +
+ "one\022\035.IsRestoreSnapshotDoneRequest\032\036.IsR" +
+ "estoreSnapshotDoneResponse\022>\n\rExecProced" +
+ "ure\022\025.ExecProcedureRequest\032\026.ExecProcedu" +
+ "reResponse\022E\n\024ExecProcedureWithRet\022\025.Exe" +
+ "cProcedureRequest\032\026.ExecProcedureRespons" +
+ "e\022D\n\017IsProcedureDone\022\027.IsProcedureDoneRe" +
+ "quest\032\030.IsProcedureDoneResponse\022D\n\017Modif" +
+ "yNamespace\022\027.ModifyNamespaceRequest\032\030.Mo",
+ "difyNamespaceResponse\022D\n\017CreateNamespace" +
+ "\022\027.CreateNamespaceRequest\032\030.CreateNamesp" +
+ "aceResponse\022D\n\017DeleteNamespace\022\027.DeleteN" +
+ "amespaceRequest\032\030.DeleteNamespaceRespons" +
+ "e\022Y\n\026GetNamespaceDescriptor\022\036.GetNamespa" +
+ "ceDescriptorRequest\032\037.GetNamespaceDescri" +
+ "ptorResponse\022_\n\030ListNamespaceDescriptors" +
+ "\022 .ListNamespaceDescriptorsRequest\032!.Lis" +
+ "tNamespaceDescriptorsResponse\022t\n\037ListTab" +
+ "leDescriptorsByNamespace\022\'.ListTableDesc",
+ "riptorsByNamespaceRequest\032(.ListTableDes" +
+ "criptorsByNamespaceResponse\022b\n\031ListTable" +
+ "NamesByNamespace\022!.ListTableNamesByNames" +
+ "paceRequest\032\".ListTableNamesByNamespaceR" +
+ "esponse\022f\n\037getLastMajorCompactionTimesta" +
+ "mp\022 .MajorCompactionTimestampRequest\032!.M" +
+ "ajorCompactionTimestampResponse\022x\n(getLa" +
+ "stMajorCompactionTimestampForRegion\022).Ma" +
+ "jorCompactionTimestampForRegionRequest\032!" +
+ ".MajorCompactionTimestampResponseBB\n*org",
+ ".apache.hadoop.hbase.protobuf.generatedB" +
+ "\014MasterProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -46193,6 +47941,24 @@ public final class MasterProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_IsProcedureDoneResponse_descriptor,
new java.lang.String[] { "Done", "Snapshot", });
+ internal_static_MajorCompactionTimestampRequest_descriptor =
+ getDescriptor().getMessageTypes().get(82);
+ internal_static_MajorCompactionTimestampRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_MajorCompactionTimestampRequest_descriptor,
+ new java.lang.String[] { "TableName", });
+ internal_static_MajorCompactionTimestampForRegionRequest_descriptor =
+ getDescriptor().getMessageTypes().get(83);
+ internal_static_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_MajorCompactionTimestampForRegionRequest_descriptor,
+ new java.lang.String[] { "Region", });
+ internal_static_MajorCompactionTimestampResponse_descriptor =
+ getDescriptor().getMessageTypes().get(84);
+ internal_static_MajorCompactionTimestampResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_MajorCompactionTimestampResponse_descriptor,
+ new java.lang.String[] { "CompactionTimestamp", });
return null;
}
};
diff --git a/hbase-protocol/src/main/protobuf/ClusterStatus.proto b/hbase-protocol/src/main/protobuf/ClusterStatus.proto
index 7e78395..2b2d9eb 100644
--- a/hbase-protocol/src/main/protobuf/ClusterStatus.proto
+++ b/hbase-protocol/src/main/protobuf/ClusterStatus.proto
@@ -113,6 +113,8 @@ message RegionLoad {
/** The current data locality for region in the regionserver */
optional float data_locality = 16;
+
+ optional uint64 last_major_compaction_ts = 17 [default = 0];
}
/* Server-level protobufs */
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index 34f68e9..170a326 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -364,6 +364,18 @@ message IsProcedureDoneResponse {
optional ProcedureDescription snapshot = 2;
}
+message MajorCompactionTimestampRequest {
+ required TableName table_name = 1;
+}
+
+message MajorCompactionTimestampForRegionRequest {
+ required RegionSpecifier region = 1;
+}
+
+message MajorCompactionTimestampResponse {
+ required int64 compaction_timestamp = 1;
+}
+
service MasterService {
/** Used by the client to get the number of regions that have received the updated schema */
rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest)
@@ -571,4 +583,12 @@ service MasterService {
/** returns a list of tables for a given namespace*/
rpc ListTableNamesByNamespace(ListTableNamesByNamespaceRequest)
returns(ListTableNamesByNamespaceResponse);
+
+ /** Returns the timestamp of the last major compaction */
+ rpc getLastMajorCompactionTimestamp(MajorCompactionTimestampRequest)
+ returns(MajorCompactionTimestampResponse);
+
+ /** Returns the timestamp of the last major compaction */
+ rpc getLastMajorCompactionTimestampForRegion(MajorCompactionTimestampForRegionRequest)
+ returns(MajorCompactionTimestampResponse);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
index 2bef680..52491e6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
@@ -148,6 +148,9 @@ public abstract class AbstractHFileWriter implements HFile.Writer {
int avgValueLen =
entryCount == 0 ? 0 : (int) (totalValueLength / entryCount);
fileInfo.append(FileInfo.AVG_VALUE_LEN, Bytes.toBytes(avgValueLen), false);
+
+ fileInfo.append(FileInfo.CREATE_TIME_TS, Bytes.toBytes(hFileContext.getFileCreateTime()),
+ false);
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index f938020..f168420 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -540,6 +540,7 @@ public class HFile {
static final byte [] LASTKEY = Bytes.toBytes(RESERVED_PREFIX + "LASTKEY");
static final byte [] AVG_KEY_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_KEY_LEN");
static final byte [] AVG_VALUE_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_VALUE_LEN");
+ static final byte [] CREATE_TIME_TS = Bytes.toBytes(RESERVED_PREFIX + "CREATE_TIME_TS");
static final byte [] COMPARATOR = Bytes.toBytes(RESERVED_PREFIX + "COMPARATOR");
static final byte [] TAGS_COMPRESSED = Bytes.toBytes(RESERVED_PREFIX + "TAGS_COMPRESSED");
public static final byte [] MAX_TAGS_LEN = Bytes.toBytes(RESERVED_PREFIX + "MAX_TAGS_LEN");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
index e466041..26cb6c9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
@@ -157,6 +157,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
// File info
fileInfo = new FileInfo();
fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
+ this.hfileContext.setFileCreateTime(Bytes.toLong(fileInfo.get(FileInfo.CREATE_TIME_TS)));
lastKey = fileInfo.get(FileInfo.LASTKEY);
avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN));
avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN));
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 775f018..2617c85 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2305,4 +2305,14 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
}
}
+
+ @Override
+ public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
+ return getClusterStatus().getLastMajorCompactionTsForTable(table);
+ }
+
+ @Override
+ public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
+ return getClusterStatus().getLastMajorCompactionTsForRegion(regionName);
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index d8a71ff..81424bf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -106,6 +106,9 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescript
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
@@ -1243,4 +1246,35 @@ public class MasterRpcServices extends RSRpcServices
throw new ServiceException(ioe);
}
}
+
+ @Override
+ public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(RpcController controller,
+ MajorCompactionTimestampRequest request) throws ServiceException {
+ MajorCompactionTimestampResponse.Builder response =
+ MajorCompactionTimestampResponse.newBuilder();
+ try {
+ master.checkInitialized();
+ response.setCompactionTimestamp(master.getLastMajorCompactionTimestamp(ProtobufUtil
+ .toTableName(request.getTableName())));
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ return response.build();
+ }
+
+ @Override
+ public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
+ RpcController controller, MajorCompactionTimestampForRegionRequest request)
+ throws ServiceException {
+ MajorCompactionTimestampResponse.Builder response =
+ MajorCompactionTimestampResponse.newBuilder();
+ try {
+ master.checkInitialized();
+ response.setCompactionTimestamp(master.getLastMajorCompactionTimestampForRegion(request
+ .getRegion().getValue().toByteArray()));
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ return response.build();
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 627b3c5..458e53c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -250,4 +250,20 @@ public interface MasterServices extends Server {
* @throws IOException
*/
public List listTableNamesByNamespace(String name) throws IOException;
+
+ /**
+ * @param table
+ * @return the timestamp of the last successful major compaction for the passed table,
+ * or 0 if no HFile resulting from a major compaction exists
+ * @throws IOException
+ */
+ public long getLastMajorCompactionTimestamp(TableName table) throws IOException;
+
+ /**
+ * @param regionName
+ * @return the timestamp of the last successful major compaction for the passed region
+ * or 0 if no HFile resulting from a major compaction exists
+ * @throws IOException
+ */
+ public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 758af86..15270b3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -118,6 +118,7 @@ import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
import org.apache.hadoop.hbase.ipc.RpcCallContext;
import org.apache.hadoop.hbase.ipc.RpcServer;
@@ -1492,6 +1493,28 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
return Collections.min(lastStoreFlushTimeMap.values());
}
+ /**
+ * This can be used to determine the last time all files of this region were major compacted.
+ * @param majorCompactioOnly Only consider HFile that are the result of major compaction
+ * @return the timestamp of the oldest HFile for all stores of this region
+ */
+ public long getOldestHfileTs(boolean majorCompactioOnly) throws IOException {
+ long result = Long.MAX_VALUE;
+ for (Store store : getStores().values()) {
+ for (StoreFile file : store.getStorefiles()) {
+ HFile.Reader reader = file.getReader().getHFileReader();
+ if (majorCompactioOnly) {
+ byte[] val = reader.loadFileInfo().get(StoreFile.MAJOR_COMPACTION_KEY);
+ if (val == null || !Bytes.toBoolean(val)) {
+ continue;
+ }
+ }
+ result = Math.min(result, reader.getFileContext().getFileCreateTime());
+ }
+ }
+ return result == Long.MAX_VALUE ? 0 : result;
+ }
+
//////////////////////////////////////////////////////////////////////////////
// HRegion maintenance.
//
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 484d437..a76c9c6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1328,7 +1328,7 @@ public class HRegionServer extends HasThread implements
* @throws IOException
*/
private RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr,
- RegionSpecifier.Builder regionSpecifier) {
+ RegionSpecifier.Builder regionSpecifier) throws IOException {
byte[] name = r.getRegionName();
int stores = 0;
int storefiles = 0;
@@ -1390,8 +1390,8 @@ public class HRegionServer extends HasThread implements
.setTotalCompactingKVs(totalCompactingKVs)
.setCurrentCompactedKVs(currentCompactedKVs)
.setCompleteSequenceId(r.maxFlushedSeqId)
- .setDataLocality(dataLocality);
-
+ .setDataLocality(dataLocality)
+ .setLastMajorCompactionTs(r.getOldestHfileTs(true));
return regionLoadBldr.build();
}
@@ -1399,7 +1399,7 @@ public class HRegionServer extends HasThread implements
* @param encodedRegionName
* @return An instance of RegionLoad.
*/
- public RegionLoad createRegionLoad(final String encodedRegionName) {
+ public RegionLoad createRegionLoad(final String encodedRegionName) throws IOException {
HRegion r = null;
r = this.onlineRegions.get(encodedRegionName);
return r != null ? createRegionLoad(r, null, null) : null;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 6a65038..94cf9d5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -991,6 +991,7 @@ public class HStore implements Store {
.withHBaseCheckSum(true)
.withDataBlockEncoding(family.getDataBlockEncoding())
.withEncryptionContext(cryptoContext)
+ .withCreateTime(EnvironmentEdgeManager.currentTime())
.build();
return hFileContext;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
index e7d8da2..400d929 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
@@ -412,6 +412,58 @@ public class TestAdmin1 {
}
@Test (timeout=300000)
+ public void testCompactionTimestamps() throws Exception {
+ HColumnDescriptor fam1 = new HColumnDescriptor("fam1");
+ TableName tableName = TableName.valueOf("testCompactionTimestampsTable");
+ HTableDescriptor htd = new HTableDescriptor(tableName);
+ htd.addFamily(fam1);
+ this.admin.createTable(htd);
+ HTable table = (HTable)TEST_UTIL.getConnection().getTable(htd.getTableName());
+ long ts = this.admin.getLastMajorCompactionTimestamp(tableName);
+ assertEquals(0, ts);
+ Put p = new Put(Bytes.toBytes("row1"));
+ p.add(Bytes.toBytes("fam1"), Bytes.toBytes("fam1"), Bytes.toBytes("fam1"));
+ table.put(p);
+ ts = this.admin.getLastMajorCompactionTimestamp(tableName);
+ // no files written -> no data
+ assertEquals(0, ts);
+
+ this.admin.flush(tableName);
+ ts = this.admin.getLastMajorCompactionTimestamp(tableName);
+ // still 0, we flushed a file, but no major compaction happened
+ assertEquals(0, ts);
+
+ byte[] regionName =
+ table.getRegionLocator().getAllRegionLocations().get(0).getRegionInfo().getRegionName();
+ long ts1 = this.admin.getLastMajorCompactionTimestampForRegion(regionName);
+ assertEquals(ts, ts1);
+ p = new Put(Bytes.toBytes("row2"));
+ p.add(Bytes.toBytes("fam1"), Bytes.toBytes("fam1"), Bytes.toBytes("fam1"));
+ table.put(p);
+ this.admin.flush(tableName);
+ ts = this.admin.getLastMajorCompactionTimestamp(tableName);
+ // make sure the region API returns the same value, as the old file is still around
+ assertEquals(ts1, ts);
+
+ TEST_UTIL.compact(tableName, true);
+ table.put(p);
+ // forces a wait for the compaction
+ this.admin.flush(tableName);
+ ts = this.admin.getLastMajorCompactionTimestamp(tableName);
+ // after a compaction our earliest timestamp will have progressed forward
+ assertTrue(ts > ts1);
+
+ // region api still the same
+ ts1 = this.admin.getLastMajorCompactionTimestampForRegion(regionName);
+ assertEquals(ts, ts1);
+ table.put(p);
+ this.admin.flush(tableName);
+ ts = this.admin.getLastMajorCompactionTimestamp(tableName);
+ assertEquals(ts, ts1);
+ table.close();
+ }
+
+ @Test (timeout=300000)
public void testHColumnValidName() {
boolean exceptionThrown;
try {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index 912c600..e4c1dbb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -427,6 +427,18 @@ public class TestCatalogJanitor {
// Auto-generated method stub
return false;
}
+
+ @Override
+ public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
+ // Auto-generated method stub
+ return 0;
+ }
+
+ @Override
+ public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
+ // Auto-generated method stub
+ return 0;
+ }
}
@Test