diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index b93312a..2f88a48 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -268,6 +268,18 @@ public class ClusterStatus extends VersionedWritable {
return masterCoprocessors;
}
+ public long getOldestHFileTs(TableName table) {
+ long result = Long.MAX_VALUE;
+ for (ServerName server : getServers()) {
+ ServerLoad load = getLoad(server);
+ for (RegionLoad rl : load.getRegionsLoad().values()) {
+ if (table.equals(HRegionInfo.getTable(rl.getName()))) {
+ result = Math.min(result, rl.getOldestHFileTs());
+ }
+ }
+ }
+ return result == Long.MAX_VALUE ? 0 : result;
+ }
public boolean isBalancerOn() {
return balancerOn != null && balancerOn;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
index 234c5ae..fa7cbb9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
@@ -169,6 +169,14 @@ public class RegionLoad {
}
return 0.0f;
}
+
+ /**
+ * @return the timestamp of the oldest hfile for any store of this region.
+ */
+ public long getOldestHFileTs() {
+ return regionLoadPB.getOldestHfileTs();
+ }
+
/**
* @see java.lang.Object#toString()
*/
@@ -179,7 +187,9 @@ public class RegionLoad {
sb = Strings.appendKeyValue(sb, "numberOfStorefiles",
this.getStorefiles());
sb = Strings.appendKeyValue(sb, "storefileUncompressedSizeMB",
- this.getStoreUncompressedSizeMB());
+ this.getStoreUncompressedSizeMB());
+ sb = Strings.appendKeyValue(sb, "oldestHFileTimestamp",
+ this.getOldestHFileTs());
sb = Strings.appendKeyValue(sb, "storefileSizeMB",
this.getStorefileSizeMB());
if (this.getStoreUncompressedSizeMB() != 0) {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
index ce8b71a..24892e6 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
@@ -56,6 +56,7 @@ public class HFileContext implements HeapSize, Cloneable {
private DataBlockEncoding encoding = DataBlockEncoding.NONE;
/** Encryption algorithm and key used */
private Encryption.Context cryptoContext = Encryption.Context.NONE;
+ private long fileCreateTime;
//Empty constructor. Go with setters
public HFileContext() {
@@ -76,12 +77,13 @@ public class HFileContext implements HeapSize, Cloneable {
this.blocksize = context.blocksize;
this.encoding = context.encoding;
this.cryptoContext = context.cryptoContext;
+ this.fileCreateTime = context.fileCreateTime;
}
public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
int bytesPerChecksum, int blockSize, DataBlockEncoding encoding,
- Encryption.Context cryptoContext) {
+ Encryption.Context cryptoContext, long fileCreateTime) {
this.usesHBaseChecksum = useHBaseChecksum;
this.includesMvcc = includesMvcc;
this.includesTags = includesTags;
@@ -94,6 +96,7 @@ public class HFileContext implements HeapSize, Cloneable {
this.encoding = encoding;
}
this.cryptoContext = cryptoContext;
+ this.fileCreateTime = fileCreateTime;
}
/**
@@ -141,6 +144,10 @@ public class HFileContext implements HeapSize, Cloneable {
this.includesTags = includesTags;
}
+ public void setFileCreateTime(long fileCreateTime) {
+ this.fileCreateTime = fileCreateTime;
+ }
+
public boolean isCompressTags() {
return compressTags;
}
@@ -161,6 +168,10 @@ public class HFileContext implements HeapSize, Cloneable {
return blocksize;
}
+ public long getFileCreateTime() {
+ return fileCreateTime;
+ }
+
public DataBlockEncoding getDataBlockEncoding() {
return encoding;
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
index 5c5d75f..0d1e6ef 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
@@ -52,6 +52,7 @@ public class HFileContextBuilder {
private DataBlockEncoding encoding = DataBlockEncoding.NONE;
/** Crypto context */
private Encryption.Context cryptoContext = Encryption.Context.NONE;
+ private long fileCreateTime = 0;
public HFileContextBuilder withHBaseCheckSum(boolean useHBaseCheckSum) {
this.usesHBaseChecksum = useHBaseCheckSum;
@@ -103,8 +104,14 @@ public class HFileContextBuilder {
return this;
}
+ public HFileContextBuilder withCreateTime(long fileCreateTime) {
+ this.fileCreateTime = fileCreateTime;
+ return this;
+ }
+
public HFileContext build() {
return new HFileContext(usesHBaseChecksum, includesMvcc, includesTags, compression,
- compressTags, checksumType, bytesPerChecksum, blocksize, encoding, cryptoContext);
+ compressTags, checksumType, bytesPerChecksum, blocksize, encoding, cryptoContext,
+ fileCreateTime);
}
}
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
index 5bc44ff..e5abb88 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
@@ -2171,6 +2171,16 @@ public final class ClusterStatusProtos {
*
*/
float getDataLocality();
+
+ // optional uint64 oldest_hfile_ts = 17 [default = 0];
+ /**
+ * optional uint64 oldest_hfile_ts = 17 [default = 0];
+ */
+ boolean hasOldestHfileTs();
+ /**
+ * optional uint64 oldest_hfile_ts = 17 [default = 0];
+ */
+ long getOldestHfileTs();
}
/**
* Protobuf type {@code RegionLoad}
@@ -2311,6 +2321,11 @@ public final class ClusterStatusProtos {
dataLocality_ = input.readFloat();
break;
}
+ case 136: {
+ bitField0_ |= 0x00010000;
+ oldestHfileTs_ = input.readUInt64();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -2753,6 +2768,22 @@ public final class ClusterStatusProtos {
return dataLocality_;
}
+ // optional uint64 oldest_hfile_ts = 17 [default = 0];
+ public static final int OLDEST_HFILE_TS_FIELD_NUMBER = 17;
+ private long oldestHfileTs_;
+ /**
+ * optional uint64 oldest_hfile_ts = 17 [default = 0];
+ */
+ public boolean hasOldestHfileTs() {
+ return ((bitField0_ & 0x00010000) == 0x00010000);
+ }
+ /**
+ * optional uint64 oldest_hfile_ts = 17 [default = 0];
+ */
+ public long getOldestHfileTs() {
+ return oldestHfileTs_;
+ }
+
private void initFields() {
regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
stores_ = 0;
@@ -2770,6 +2801,7 @@ public final class ClusterStatusProtos {
totalStaticBloomSizeKB_ = 0;
completeSequenceId_ = 0L;
dataLocality_ = 0F;
+ oldestHfileTs_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -2839,6 +2871,9 @@ public final class ClusterStatusProtos {
if (((bitField0_ & 0x00008000) == 0x00008000)) {
output.writeFloat(16, dataLocality_);
}
+ if (((bitField0_ & 0x00010000) == 0x00010000)) {
+ output.writeUInt64(17, oldestHfileTs_);
+ }
getUnknownFields().writeTo(output);
}
@@ -2912,6 +2947,10 @@ public final class ClusterStatusProtos {
size += com.google.protobuf.CodedOutputStream
.computeFloatSize(16, dataLocality_);
}
+ if (((bitField0_ & 0x00010000) == 0x00010000)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(17, oldestHfileTs_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -3014,6 +3053,11 @@ public final class ClusterStatusProtos {
if (hasDataLocality()) {
result = result && (Float.floatToIntBits(getDataLocality()) == Float.floatToIntBits(other.getDataLocality()));
}
+ result = result && (hasOldestHfileTs() == other.hasOldestHfileTs());
+ if (hasOldestHfileTs()) {
+ result = result && (getOldestHfileTs()
+ == other.getOldestHfileTs());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -3092,6 +3136,10 @@ public final class ClusterStatusProtos {
hash = (53 * hash) + Float.floatToIntBits(
getDataLocality());
}
+ if (hasOldestHfileTs()) {
+ hash = (37 * hash) + OLDEST_HFILE_TS_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getOldestHfileTs());
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -3238,6 +3286,8 @@ public final class ClusterStatusProtos {
bitField0_ = (bitField0_ & ~0x00004000);
dataLocality_ = 0F;
bitField0_ = (bitField0_ & ~0x00008000);
+ oldestHfileTs_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00010000);
return this;
}
@@ -3334,6 +3384,10 @@ public final class ClusterStatusProtos {
to_bitField0_ |= 0x00008000;
}
result.dataLocality_ = dataLocality_;
+ if (((from_bitField0_ & 0x00010000) == 0x00010000)) {
+ to_bitField0_ |= 0x00010000;
+ }
+ result.oldestHfileTs_ = oldestHfileTs_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -3398,6 +3452,9 @@ public final class ClusterStatusProtos {
if (other.hasDataLocality()) {
setDataLocality(other.getDataLocality());
}
+ if (other.hasOldestHfileTs()) {
+ setOldestHfileTs(other.getOldestHfileTs());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -4337,6 +4394,39 @@ public final class ClusterStatusProtos {
return this;
}
+ // optional uint64 oldest_hfile_ts = 17 [default = 0];
+ private long oldestHfileTs_ ;
+ /**
+ * optional uint64 oldest_hfile_ts = 17 [default = 0];
+ */
+ public boolean hasOldestHfileTs() {
+ return ((bitField0_ & 0x00010000) == 0x00010000);
+ }
+ /**
+ * optional uint64 oldest_hfile_ts = 17 [default = 0];
+ */
+ public long getOldestHfileTs() {
+ return oldestHfileTs_;
+ }
+ /**
+ * optional uint64 oldest_hfile_ts = 17 [default = 0];
+ */
+ public Builder setOldestHfileTs(long value) {
+ bitField0_ |= 0x00010000;
+ oldestHfileTs_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional uint64 oldest_hfile_ts = 17 [default = 0];
+ */
+ public Builder clearOldestHfileTs() {
+ bitField0_ = (bitField0_ & ~0x00010000);
+ oldestHfileTs_ = 0L;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:RegionLoad)
}
@@ -10472,7 +10562,7 @@ public final class ClusterStatusProtos {
"PLITTING_NEW\020\r\022\017\n\013MERGING_NEW\020\016\"X\n\022Regio",
"nInTransition\022\036\n\004spec\030\001 \002(\0132\020.RegionSpec" +
"ifier\022\"\n\014region_state\030\002 \002(\0132\014.RegionStat" +
- "e\"\347\003\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" +
+ "e\"\203\004\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" +
"(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\n" +
"storefiles\030\003 \001(\r\022\"\n\032store_uncompressed_s" +
"ize_MB\030\004 \001(\r\022\031\n\021storefile_size_MB\030\005 \001(\r\022" +
@@ -10484,26 +10574,27 @@ public final class ClusterStatusProtos {
"\030\014 \001(\r\022\"\n\032total_static_index_size_KB\030\r \001" +
"(\r\022\"\n\032total_static_bloom_size_KB\030\016 \001(\r\022\034" +
"\n\024complete_sequence_id\030\017 \001(\004\022\025\n\rdata_loc" +
- "ality\030\020 \001(\002\"\212\002\n\nServerLoad\022\032\n\022number_of_" +
- "requests\030\001 \001(\r\022 \n\030total_number_of_reques" +
- "ts\030\002 \001(\r\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013max_he" +
- "ap_MB\030\004 \001(\r\022!\n\014region_loads\030\005 \003(\0132\013.Regi" +
- "onLoad\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocess" +
- "or\022\031\n\021report_start_time\030\007 \001(\004\022\027\n\017report_",
- "end_time\030\010 \001(\004\022\030\n\020info_server_port\030\t \001(\r" +
- "\"O\n\016LiveServerInfo\022\033\n\006server\030\001 \002(\0132\013.Ser" +
- "verName\022 \n\013server_load\030\002 \002(\0132\013.ServerLoa" +
- "d\"\340\002\n\rClusterStatus\022/\n\rhbase_version\030\001 \001" +
- "(\0132\030.HBaseVersionFileContent\022%\n\014live_ser" +
- "vers\030\002 \003(\0132\017.LiveServerInfo\022!\n\014dead_serv" +
- "ers\030\003 \003(\0132\013.ServerName\0222\n\025regions_in_tra" +
- "nsition\030\004 \003(\0132\023.RegionInTransition\022\036\n\ncl" +
- "uster_id\030\005 \001(\0132\n.ClusterId\022)\n\023master_cop" +
- "rocessors\030\006 \003(\0132\014.Coprocessor\022\033\n\006master\030",
- "\007 \001(\0132\013.ServerName\022#\n\016backup_masters\030\010 \003" +
- "(\0132\013.ServerName\022\023\n\013balancer_on\030\t \001(\010BF\n*" +
- "org.apache.hadoop.hbase.protobuf.generat" +
- "edB\023ClusterStatusProtosH\001\240\001\001"
+ "ality\030\020 \001(\002\022\032\n\017oldest_hfile_ts\030\021 \001(\004:\0010\"" +
+ "\212\002\n\nServerLoad\022\032\n\022number_of_requests\030\001 \001" +
+ "(\r\022 \n\030total_number_of_requests\030\002 \001(\r\022\024\n\014" +
+ "used_heap_MB\030\003 \001(\r\022\023\n\013max_heap_MB\030\004 \001(\r\022" +
+ "!\n\014region_loads\030\005 \003(\0132\013.RegionLoad\022\"\n\014co" +
+ "processors\030\006 \003(\0132\014.Coprocessor\022\031\n\021report",
+ "_start_time\030\007 \001(\004\022\027\n\017report_end_time\030\010 \001" +
+ "(\004\022\030\n\020info_server_port\030\t \001(\r\"O\n\016LiveServ" +
+ "erInfo\022\033\n\006server\030\001 \002(\0132\013.ServerName\022 \n\013s" +
+ "erver_load\030\002 \002(\0132\013.ServerLoad\"\340\002\n\rCluste" +
+ "rStatus\022/\n\rhbase_version\030\001 \001(\0132\030.HBaseVe" +
+ "rsionFileContent\022%\n\014live_servers\030\002 \003(\0132\017" +
+ ".LiveServerInfo\022!\n\014dead_servers\030\003 \003(\0132\013." +
+ "ServerName\0222\n\025regions_in_transition\030\004 \003(" +
+ "\0132\023.RegionInTransition\022\036\n\ncluster_id\030\005 \001" +
+ "(\0132\n.ClusterId\022)\n\023master_coprocessors\030\006 ",
+ "\003(\0132\014.Coprocessor\022\033\n\006master\030\007 \001(\0132\013.Serv" +
+ "erName\022#\n\016backup_masters\030\010 \003(\0132\013.ServerN" +
+ "ame\022\023\n\013balancer_on\030\t \001(\010BF\n*org.apache.h" +
+ "adoop.hbase.protobuf.generatedB\023ClusterS" +
+ "tatusProtosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -10527,7 +10618,7 @@ public final class ClusterStatusProtos {
internal_static_RegionLoad_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionLoad_descriptor,
- new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", });
+ new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", "OldestHfileTs", });
internal_static_ServerLoad_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_ServerLoad_fieldAccessorTable = new
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/FilterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/FilterProtos.java
index 3fb466d..af1f33d 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/FilterProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/FilterProtos.java
@@ -16611,7 +16611,7 @@ public final class FilterProtos {
/**
* repeated .RowRange row_range_list = 1;
*/
- java.util.List extends org.apache.hadoop.hbase.protobuf.generated.FilterProtos.RowRangeOrBuilder>
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.FilterProtos.RowRangeOrBuilder>
getRowRangeListOrBuilderList();
/**
* repeated .RowRange row_range_list = 1;
@@ -17270,12 +17270,12 @@ public final class FilterProtos {
/**
* repeated .RowRange row_range_list = 1;
*/
- public java.util.List
+ public java.util.List
getRowRangeListBuilderList() {
return getRowRangeListFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.FilterProtos.RowRange, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.RowRange.Builder, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.RowRangeOrBuilder>
+ org.apache.hadoop.hbase.protobuf.generated.FilterProtos.RowRange, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.RowRange.Builder, org.apache.hadoop.hbase.protobuf.generated.FilterProtos.RowRangeOrBuilder>
getRowRangeListFieldBuilder() {
if (rowRangeListBuilder_ == null) {
rowRangeListBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/VisibilityLabelsProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/VisibilityLabelsProtos.java
index 294772e..70593b0 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/VisibilityLabelsProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/VisibilityLabelsProtos.java
@@ -5092,7 +5092,7 @@ public final class VisibilityLabelsProtos {
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
- com.google.protobuf.ByteString bs =
+ com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
@@ -5108,7 +5108,7 @@ public final class VisibilityLabelsProtos {
getRegexBytes() {
java.lang.Object ref = regex_;
if (ref instanceof java.lang.String) {
- com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
regex_ = b;
@@ -5414,7 +5414,7 @@ public final class VisibilityLabelsProtos {
getRegexBytes() {
java.lang.Object ref = regex_;
if (ref instanceof String) {
- com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
regex_ = b;
diff --git a/hbase-protocol/src/main/protobuf/ClusterStatus.proto b/hbase-protocol/src/main/protobuf/ClusterStatus.proto
index 7e78395..a3872ca 100644
--- a/hbase-protocol/src/main/protobuf/ClusterStatus.proto
+++ b/hbase-protocol/src/main/protobuf/ClusterStatus.proto
@@ -113,6 +113,8 @@ message RegionLoad {
/** The current data locality for region in the regionserver */
optional float data_locality = 16;
+
+ optional uint64 oldest_hfile_ts = 17 [default = 0];
}
/* Server-level protobufs */
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
index 2bef680..d57e618 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
@@ -148,6 +148,8 @@ public abstract class AbstractHFileWriter implements HFile.Writer {
int avgValueLen =
entryCount == 0 ? 0 : (int) (totalValueLength / entryCount);
fileInfo.append(FileInfo.AVG_VALUE_LEN, Bytes.toBytes(avgValueLen), false);
+
+ fileInfo.append(FileInfo.CREATE_TIME_TS, Bytes.toBytes(hFileContext.getFileCreateTime()), false);
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index 1e97f63..ad62d71 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -541,6 +541,7 @@ public class HFile {
static final byte [] LASTKEY = Bytes.toBytes(RESERVED_PREFIX + "LASTKEY");
static final byte [] AVG_KEY_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_KEY_LEN");
static final byte [] AVG_VALUE_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_VALUE_LEN");
+ static final byte [] CREATE_TIME_TS = Bytes.toBytes(RESERVED_PREFIX + "CREATE_TIME_TS");
static final byte [] COMPARATOR = Bytes.toBytes(RESERVED_PREFIX + "COMPARATOR");
static final byte [] TAGS_COMPRESSED = Bytes.toBytes(RESERVED_PREFIX + "TAGS_COMPRESSED");
public static final byte [] MAX_TAGS_LEN = Bytes.toBytes(RESERVED_PREFIX + "MAX_TAGS_LEN");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
index d58ca10..f33fca4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
@@ -157,6 +157,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
// File info
fileInfo = new FileInfo();
fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
+ this.hfileContext.setFileCreateTime(Bytes.toLong(fileInfo.get(FileInfo.CREATE_TIME_TS)));
lastKey = fileInfo.get(FileInfo.LASTKEY);
avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN));
avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN));
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index dd1cf8d..a63675a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1493,6 +1493,21 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
return Collections.min(lastStoreFlushTimeMap.values());
}
+ /**
+ * This can be used to determine the last all file of this region were major compacted.
+ *
+ * @return the timestamp of the oldest HFile for all stores of this region
+ */
+ public long getOldestHFileTs() {
+ long result = Long.MAX_VALUE;
+ for (Store store : getStores().values()) {
+ for (StoreFile file : store.getStorefiles()) {
+ result = Math.min(result, file.getReader().getHFileReader().getFileContext().getFileCreateTime());
+ }
+ }
+ return result == Long.MAX_VALUE ? 0 : result;
+ }
+
//////////////////////////////////////////////////////////////////////////////
// HRegion maintenance.
//
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 4669f8f..54f2744 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1344,9 +1344,13 @@ public class HRegionServer extends HasThread implements
int totalStaticBloomSizeKB = 0;
long totalCompactingKVs = 0;
long currentCompactedKVs = 0;
+ long oldestHFileTs = Long.MAX_VALUE;
synchronized (r.stores) {
stores += r.stores.size();
for (Store store : r.stores.values()) {
+ for (StoreFile file : store.getStorefiles()) {
+ oldestHFileTs = Math.min(oldestHFileTs, file.getReader().getHFileReader().getFileContext().getFileCreateTime());
+ }
storefiles += store.getStorefilesCount();
storeUncompressedSizeMB += (int) (store.getStoreSizeUncompressed()
/ 1024 / 1024);
@@ -1368,6 +1372,9 @@ public class HRegionServer extends HasThread implements
(int) (store.getTotalStaticBloomSize() / 1024);
}
}
+ if (oldestHFileTs == Long.MAX_VALUE) {
+ oldestHFileTs = 0;
+ }
float dataLocality =
r.getHDFSBlocksDistribution().getBlockLocalityIndex(serverName.getHostname());
if (regionLoadBldr == null) {
@@ -1393,8 +1400,8 @@ public class HRegionServer extends HasThread implements
.setTotalCompactingKVs(totalCompactingKVs)
.setCurrentCompactedKVs(currentCompactedKVs)
.setCompleteSequenceId(r.maxFlushedSeqId)
- .setDataLocality(dataLocality);
-
+ .setDataLocality(dataLocality)
+ .setOldestHfileTs(oldestHFileTs);
return regionLoadBldr.build();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 047d689..942b47f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -991,6 +991,7 @@ public class HStore implements Store {
.withHBaseCheckSum(true)
.withDataBlockEncoding(family.getDataBlockEncoding())
.withEncryptionContext(cryptoContext)
+ .withCreateTime(EnvironmentEdgeManager.currentTime())
.build();
return hFileContext;
}