diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index b93312a..bc96740 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -268,6 +268,18 @@ public class ClusterStatus extends VersionedWritable {
return masterCoprocessors;
}
+ public long getOldestHFileTs(TableName table) {
+ long result = Long.MAX_VALUE;
+ for (ServerName server : getServers()) {
+ ServerLoad load = getLoad(server);
+ for (RegionLoad rl : load.getRegionsLoad().values()) {
+ if (table.equals(HRegionInfo.getTable(rl.getName()))) {
+ result = Math.min(result, rl.getOldestHfileTs());
+ }
+ }
+ }
+ return result == Long.MAX_VALUE ? 0 : result;
+ }
public boolean isBalancerOn() {
return balancerOn != null && balancerOn;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
index 234c5ae..d56dde2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
@@ -169,6 +169,14 @@ public class RegionLoad {
}
return 0.0f;
}
+
+ /**
+ * @return the timestamp of the oldest hfile for any store of this region.
+ */
+ public long getOldestHfileTs() {
+ return regionLoadPB.getOldestHfileTs();
+ }
+
/**
* @see java.lang.Object#toString()
*/
@@ -179,7 +187,9 @@ public class RegionLoad {
sb = Strings.appendKeyValue(sb, "numberOfStorefiles",
this.getStorefiles());
sb = Strings.appendKeyValue(sb, "storefileUncompressedSizeMB",
- this.getStoreUncompressedSizeMB());
+ this.getStoreUncompressedSizeMB());
+ sb = Strings.appendKeyValue(sb, "oldestHFileTimestamp",
+ this.getOldestHfileTs());
sb = Strings.appendKeyValue(sb, "storefileSizeMB",
this.getStorefileSizeMB());
if (this.getStoreUncompressedSizeMB() != 0) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index c5d9556..1c4b3d4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -958,6 +958,28 @@ public interface Admin extends Abortable, Closeable {
final byte[] regionName) throws IOException;
/**
+ * Get the timestamp of the last major compaction for the passed table
+ * (the timestamp of the oldest HFile of that table)
+ *
+ * @param tableName table to examine
+ * @return the last major compaction timestamp
+ * @throws IOException if a remote or network exception occurs
+ */
+ long getLastMajorCompactionTs(final TableName tableName)
+ throws IOException;
+
+ /**
+ * Get the timestamp of the last major compaction for the passed region
+ * (the timestamp of the oldest HFile of that region)
+ *
+ * @param regionName region to examine
+ * @return the last major compaction timestamp
+ * @throws IOException if a remote or network exception occurs
+ */
+ long getLastMajorCompactionTsForRegion(final byte[] regionName)
+ throws IOException;
+
+ /**
* Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
* taken. If the table is disabled, an offline snapshot is taken. Snapshots are considered unique
* based on the name of the snapshot. Attempts to take a snapshot with the same name (even
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 5a9ca74..64c7c49 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -3773,4 +3773,75 @@ public class HBaseAdmin implements Admin {
throw new IOException("Failed to get master info port from MasterAddressTracker", e);
}
}
+
+ @Override
+ public long getLastMajorCompactionTs(TableName tableName) throws IOException {
+ ZooKeeperWatcher zookeeper =
+ new ZooKeeperWatcher(conf, ZK_IDENTIFIER_PREFIX + connection.toString(),
+ new ThrowableAbortable());
+ long ts = Long.MAX_VALUE;
+ try {
+ checkTableExists(tableName);
+ List> pairs;
+ if (TableName.META_TABLE_NAME.equals(tableName)) {
+ pairs = new MetaTableLocator().getMetaRegionsAndLocations(zookeeper);
+ } else {
+ pairs = MetaTableAccessor.getTableRegionsAndLocations(connection, tableName);
+ }
+ for (Pair pair: pairs) {
+ if (pair.getFirst().isOffline()) continue;
+ if (pair.getSecond() == null) continue;
+ try {
+ ServerName sn = pair.getSecond();
+ AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
+ GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
+ pair.getFirst().getRegionName(), true);
+ GetRegionInfoResponse response = admin.getRegionInfo(null, request);
+ ts = Math.min(ts, response.getOldestHfileTs());
+ } catch (NotServingRegionException e) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Trying to get compaction state of " +
+ pair.getFirst() + ": " +
+ StringUtils.stringifyException(e));
+ }
+ } catch (RemoteException e) {
+ if (e.getMessage().indexOf(NotServingRegionException.class.getName()) >= 0) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Trying to get compaction state of " + pair.getFirst() + ": "
+ + StringUtils.stringifyException(e));
+ }
+ } else {
+ throw e;
+ }
+ }
+ }
+ } catch (ServiceException se) {
+ throw ProtobufUtil.getRemoteException(se);
+ } finally {
+ zookeeper.close();
+ }
+ return ts == Long.MAX_VALUE ? 0 : ts;
+ }
+
+ @Override
+ public long getLastMajorCompactionTsForRegion(byte[] regionName) throws IOException {
+ try {
+ Pair regionServerPair = getRegion(regionName);
+ if (regionServerPair == null) {
+ throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
+ }
+ if (regionServerPair.getSecond() == null) {
+ throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
+ }
+ ServerName sn = regionServerPair.getSecond();
+ AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
+ GetRegionInfoRequest request =
+ RequestConverter.buildGetRegionInfoRequest(regionServerPair.getFirst().getRegionName(),
+ true);
+ GetRegionInfoResponse response = admin.getRegionInfo(null, request);
+ return response.getOldestHfileTs();
+ } catch (ServiceException se) {
+ throw ProtobufUtil.getRemoteException(se);
+ }
+ }
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
index ce8b71a..24892e6 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
@@ -56,6 +56,7 @@ public class HFileContext implements HeapSize, Cloneable {
private DataBlockEncoding encoding = DataBlockEncoding.NONE;
/** Encryption algorithm and key used */
private Encryption.Context cryptoContext = Encryption.Context.NONE;
+ private long fileCreateTime;
//Empty constructor. Go with setters
public HFileContext() {
@@ -76,12 +77,13 @@ public class HFileContext implements HeapSize, Cloneable {
this.blocksize = context.blocksize;
this.encoding = context.encoding;
this.cryptoContext = context.cryptoContext;
+ this.fileCreateTime = context.fileCreateTime;
}
public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
int bytesPerChecksum, int blockSize, DataBlockEncoding encoding,
- Encryption.Context cryptoContext) {
+ Encryption.Context cryptoContext, long fileCreateTime) {
this.usesHBaseChecksum = useHBaseChecksum;
this.includesMvcc = includesMvcc;
this.includesTags = includesTags;
@@ -94,6 +96,7 @@ public class HFileContext implements HeapSize, Cloneable {
this.encoding = encoding;
}
this.cryptoContext = cryptoContext;
+ this.fileCreateTime = fileCreateTime;
}
/**
@@ -141,6 +144,10 @@ public class HFileContext implements HeapSize, Cloneable {
this.includesTags = includesTags;
}
+ public void setFileCreateTime(long fileCreateTime) {
+ this.fileCreateTime = fileCreateTime;
+ }
+
public boolean isCompressTags() {
return compressTags;
}
@@ -161,6 +168,10 @@ public class HFileContext implements HeapSize, Cloneable {
return blocksize;
}
+ public long getFileCreateTime() {
+ return fileCreateTime;
+ }
+
public DataBlockEncoding getDataBlockEncoding() {
return encoding;
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
index 5c5d75f..0d1e6ef 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
@@ -52,6 +52,7 @@ public class HFileContextBuilder {
private DataBlockEncoding encoding = DataBlockEncoding.NONE;
/** Crypto context */
private Encryption.Context cryptoContext = Encryption.Context.NONE;
+ private long fileCreateTime = 0;
public HFileContextBuilder withHBaseCheckSum(boolean useHBaseCheckSum) {
this.usesHBaseChecksum = useHBaseCheckSum;
@@ -103,8 +104,14 @@ public class HFileContextBuilder {
return this;
}
+ public HFileContextBuilder withCreateTime(long fileCreateTime) {
+ this.fileCreateTime = fileCreateTime;
+ return this;
+ }
+
public HFileContext build() {
return new HFileContext(usesHBaseChecksum, includesMvcc, includesTags, compression,
- compressTags, checksumType, bytesPerChecksum, blocksize, encoding, cryptoContext);
+ compressTags, checksumType, bytesPerChecksum, blocksize, encoding, cryptoContext,
+ fileCreateTime);
}
}
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java
index 3828742..b45145e 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java
@@ -695,6 +695,16 @@ public final class AdminProtos {
* optional bool isRecovering = 3;
*/
boolean getIsRecovering();
+
+ // optional uint64 oldest_hfile_ts = 4 [default = 0];
+ /**
+ * optional uint64 oldest_hfile_ts = 4 [default = 0];
+ */
+ boolean hasOldestHfileTs();
+ /**
+ * optional uint64 oldest_hfile_ts = 4 [default = 0];
+ */
+ long getOldestHfileTs();
}
/**
* Protobuf type {@code GetRegionInfoResponse}
@@ -776,6 +786,11 @@ public final class AdminProtos {
isRecovering_ = input.readBool();
break;
}
+ case 32: {
+ bitField0_ |= 0x00000008;
+ oldestHfileTs_ = input.readUInt64();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -970,10 +985,27 @@ public final class AdminProtos {
return isRecovering_;
}
+ // optional uint64 oldest_hfile_ts = 4 [default = 0];
+ public static final int OLDEST_HFILE_TS_FIELD_NUMBER = 4;
+ private long oldestHfileTs_;
+ /**
+ * optional uint64 oldest_hfile_ts = 4 [default = 0];
+ */
+ public boolean hasOldestHfileTs() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional uint64 oldest_hfile_ts = 4 [default = 0];
+ */
+ public long getOldestHfileTs() {
+ return oldestHfileTs_;
+ }
+
private void initFields() {
regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
compactionState_ = org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
isRecovering_ = false;
+ oldestHfileTs_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -1004,6 +1036,9 @@ public final class AdminProtos {
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeBool(3, isRecovering_);
}
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeUInt64(4, oldestHfileTs_);
+ }
getUnknownFields().writeTo(output);
}
@@ -1025,6 +1060,10 @@ public final class AdminProtos {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(3, isRecovering_);
}
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(4, oldestHfileTs_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -1063,6 +1102,11 @@ public final class AdminProtos {
result = result && (getIsRecovering()
== other.getIsRecovering());
}
+ result = result && (hasOldestHfileTs() == other.hasOldestHfileTs());
+ if (hasOldestHfileTs()) {
+ result = result && (getOldestHfileTs()
+ == other.getOldestHfileTs());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -1088,6 +1132,10 @@ public final class AdminProtos {
hash = (37 * hash) + ISRECOVERING_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getIsRecovering());
}
+ if (hasOldestHfileTs()) {
+ hash = (37 * hash) + OLDEST_HFILE_TS_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getOldestHfileTs());
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -1208,6 +1256,8 @@ public final class AdminProtos {
bitField0_ = (bitField0_ & ~0x00000002);
isRecovering_ = false;
bitField0_ = (bitField0_ & ~0x00000004);
+ oldestHfileTs_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000008);
return this;
}
@@ -1252,6 +1302,10 @@ public final class AdminProtos {
to_bitField0_ |= 0x00000004;
}
result.isRecovering_ = isRecovering_;
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.oldestHfileTs_ = oldestHfileTs_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -1277,6 +1331,9 @@ public final class AdminProtos {
if (other.hasIsRecovering()) {
setIsRecovering(other.getIsRecovering());
}
+ if (other.hasOldestHfileTs()) {
+ setOldestHfileTs(other.getOldestHfileTs());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -1498,6 +1555,39 @@ public final class AdminProtos {
return this;
}
+ // optional uint64 oldest_hfile_ts = 4 [default = 0];
+ private long oldestHfileTs_ ;
+ /**
+ * optional uint64 oldest_hfile_ts = 4 [default = 0];
+ */
+ public boolean hasOldestHfileTs() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * optional uint64 oldest_hfile_ts = 4 [default = 0];
+ */
+ public long getOldestHfileTs() {
+ return oldestHfileTs_;
+ }
+ /**
+ * optional uint64 oldest_hfile_ts = 4 [default = 0];
+ */
+ public Builder setOldestHfileTs(long value) {
+ bitField0_ |= 0x00000008;
+ oldestHfileTs_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional uint64 oldest_hfile_ts = 4 [default = 0];
+ */
+ public Builder clearOldestHfileTs() {
+ bitField0_ = (bitField0_ & ~0x00000008);
+ oldestHfileTs_ = 0L;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:GetRegionInfoResponse)
}
@@ -22046,93 +22136,93 @@ public final class AdminProtos {
"\n\013Admin.proto\032\014Client.proto\032\013HBase.proto" +
"\032\tWAL.proto\"R\n\024GetRegionInfoRequest\022 \n\006r" +
"egion\030\001 \002(\0132\020.RegionSpecifier\022\030\n\020compact" +
- "ion_state\030\002 \001(\010\"\331\001\n\025GetRegionInfoRespons" +
+ "ion_state\030\002 \001(\010\"\365\001\n\025GetRegionInfoRespons" +
"e\022 \n\013region_info\030\001 \002(\0132\013.RegionInfo\022@\n\020c" +
"ompaction_state\030\002 \001(\0162&.GetRegionInfoRes" +
"ponse.CompactionState\022\024\n\014isRecovering\030\003 " +
- "\001(\010\"F\n\017CompactionState\022\010\n\004NONE\020\000\022\t\n\005MINO" +
- "R\020\001\022\t\n\005MAJOR\020\002\022\023\n\017MAJOR_AND_MINOR\020\003\"G\n\023G" +
- "etStoreFileRequest\022 \n\006region\030\001 \002(\0132\020.Reg",
- "ionSpecifier\022\016\n\006family\030\002 \003(\014\"*\n\024GetStore" +
- "FileResponse\022\022\n\nstore_file\030\001 \003(\t\"\030\n\026GetO" +
- "nlineRegionRequest\";\n\027GetOnlineRegionRes" +
- "ponse\022 \n\013region_info\030\001 \003(\0132\013.RegionInfo\"" +
- "\374\001\n\021OpenRegionRequest\0224\n\topen_info\030\001 \003(\013" +
- "2!.OpenRegionRequest.RegionOpenInfo\022\027\n\017s" +
- "erverStartCode\030\002 \001(\004\032\227\001\n\016RegionOpenInfo\022" +
- "\033\n\006region\030\001 \002(\0132\013.RegionInfo\022\037\n\027version_" +
- "of_offline_node\030\002 \001(\r\022\"\n\rfavored_nodes\030\003" +
- " \003(\0132\013.ServerName\022#\n\033openForDistributedL",
- "ogReplay\030\004 \001(\010\"\235\001\n\022OpenRegionResponse\022=\n" +
- "\ropening_state\030\001 \003(\0162&.OpenRegionRespons" +
- "e.RegionOpeningState\"H\n\022RegionOpeningSta" +
- "te\022\n\n\006OPENED\020\000\022\022\n\016ALREADY_OPENED\020\001\022\022\n\016FA" +
- "ILED_OPENING\020\002\"\271\001\n\022CloseRegionRequest\022 \n" +
- "\006region\030\001 \002(\0132\020.RegionSpecifier\022\037\n\027versi" +
- "on_of_closing_node\030\002 \001(\r\022\036\n\020transition_i" +
- "n_ZK\030\003 \001(\010:\004true\022\'\n\022destination_server\030\004" +
- " \001(\0132\013.ServerName\022\027\n\017serverStartCode\030\005 \001" +
- "(\004\"%\n\023CloseRegionResponse\022\016\n\006closed\030\001 \002(",
- "\010\"P\n\022FlushRegionRequest\022 \n\006region\030\001 \002(\0132" +
- "\020.RegionSpecifier\022\030\n\020if_older_than_ts\030\002 " +
- "\001(\004\"?\n\023FlushRegionResponse\022\027\n\017last_flush" +
- "_time\030\001 \002(\004\022\017\n\007flushed\030\002 \001(\010\"K\n\022SplitReg" +
- "ionRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpeci" +
- "fier\022\023\n\013split_point\030\002 \001(\014\"\025\n\023SplitRegion" +
- "Response\"W\n\024CompactRegionRequest\022 \n\006regi" +
- "on\030\001 \002(\0132\020.RegionSpecifier\022\r\n\005major\030\002 \001(" +
- "\010\022\016\n\006family\030\003 \001(\014\"\027\n\025CompactRegionRespon" +
- "se\"\262\001\n\031UpdateFavoredNodesRequest\022@\n\013upda",
- "te_info\030\001 \003(\0132+.UpdateFavoredNodesReques" +
- "t.RegionUpdateInfo\032S\n\020RegionUpdateInfo\022\033" +
- "\n\006region\030\001 \002(\0132\013.RegionInfo\022\"\n\rfavored_n" +
- "odes\030\002 \003(\0132\013.ServerName\".\n\032UpdateFavored" +
- "NodesResponse\022\020\n\010response\030\001 \001(\r\"v\n\023Merge" +
- "RegionsRequest\022\"\n\010region_a\030\001 \002(\0132\020.Regio" +
- "nSpecifier\022\"\n\010region_b\030\002 \002(\0132\020.RegionSpe" +
- "cifier\022\027\n\010forcible\030\003 \001(\010:\005false\"\026\n\024Merge" +
- "RegionsResponse\"X\n\010WALEntry\022\024\n\003key\030\001 \002(\013" +
- "2\007.WALKey\022\027\n\017key_value_bytes\030\002 \003(\014\022\035\n\025as",
- "sociated_cell_count\030\003 \001(\005\"4\n\030ReplicateWA" +
- "LEntryRequest\022\030\n\005entry\030\001 \003(\0132\t.WALEntry\"" +
- "\033\n\031ReplicateWALEntryResponse\"\026\n\024RollWALW" +
- "riterRequest\"0\n\025RollWALWriterResponse\022\027\n" +
- "\017region_to_flush\030\001 \003(\014\"#\n\021StopServerRequ" +
- "est\022\016\n\006reason\030\001 \002(\t\"\024\n\022StopServerRespons" +
- "e\"\026\n\024GetServerInfoRequest\"B\n\nServerInfo\022" +
- " \n\013server_name\030\001 \002(\0132\013.ServerName\022\022\n\nweb" +
- "ui_port\030\002 \001(\r\"9\n\025GetServerInfoResponse\022 " +
- "\n\013server_info\030\001 \002(\0132\013.ServerInfo\"\034\n\032Upda",
- "teConfigurationRequest\"\035\n\033UpdateConfigur" +
- "ationResponse2\230\010\n\014AdminService\022>\n\rGetReg" +
- "ionInfo\022\025.GetRegionInfoRequest\032\026.GetRegi" +
- "onInfoResponse\022;\n\014GetStoreFile\022\024.GetStor" +
- "eFileRequest\032\025.GetStoreFileResponse\022D\n\017G" +
- "etOnlineRegion\022\027.GetOnlineRegionRequest\032" +
- "\030.GetOnlineRegionResponse\0225\n\nOpenRegion\022" +
- "\022.OpenRegionRequest\032\023.OpenRegionResponse" +
- "\0228\n\013CloseRegion\022\023.CloseRegionRequest\032\024.C" +
- "loseRegionResponse\0228\n\013FlushRegion\022\023.Flus",
- "hRegionRequest\032\024.FlushRegionResponse\0228\n\013" +
- "SplitRegion\022\023.SplitRegionRequest\032\024.Split" +
- "RegionResponse\022>\n\rCompactRegion\022\025.Compac" +
- "tRegionRequest\032\026.CompactRegionResponse\022;" +
- "\n\014MergeRegions\022\024.MergeRegionsRequest\032\025.M" +
- "ergeRegionsResponse\022J\n\021ReplicateWALEntry" +
- "\022\031.ReplicateWALEntryRequest\032\032.ReplicateW" +
- "ALEntryResponse\022?\n\006Replay\022\031.ReplicateWAL" +
- "EntryRequest\032\032.ReplicateWALEntryResponse" +
- "\022>\n\rRollWALWriter\022\025.RollWALWriterRequest",
- "\032\026.RollWALWriterResponse\022>\n\rGetServerInf" +
- "o\022\025.GetServerInfoRequest\032\026.GetServerInfo" +
- "Response\0225\n\nStopServer\022\022.StopServerReque" +
- "st\032\023.StopServerResponse\022M\n\022UpdateFavored" +
- "Nodes\022\032.UpdateFavoredNodesRequest\032\033.Upda" +
- "teFavoredNodesResponse\022P\n\023UpdateConfigur" +
- "ation\022\033.UpdateConfigurationRequest\032\034.Upd" +
- "ateConfigurationResponseBA\n*org.apache.h" +
- "adoop.hbase.protobuf.generatedB\013AdminPro" +
- "tosH\001\210\001\001\240\001\001"
+ "\001(\010\022\032\n\017oldest_hfile_ts\030\004 \001(\004:\0010\"F\n\017Compa" +
+ "ctionState\022\010\n\004NONE\020\000\022\t\n\005MINOR\020\001\022\t\n\005MAJOR" +
+ "\020\002\022\023\n\017MAJOR_AND_MINOR\020\003\"G\n\023GetStoreFileR",
+ "equest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier" +
+ "\022\016\n\006family\030\002 \003(\014\"*\n\024GetStoreFileResponse" +
+ "\022\022\n\nstore_file\030\001 \003(\t\"\030\n\026GetOnlineRegionR" +
+ "equest\";\n\027GetOnlineRegionResponse\022 \n\013reg" +
+ "ion_info\030\001 \003(\0132\013.RegionInfo\"\374\001\n\021OpenRegi" +
+ "onRequest\0224\n\topen_info\030\001 \003(\0132!.OpenRegio" +
+ "nRequest.RegionOpenInfo\022\027\n\017serverStartCo" +
+ "de\030\002 \001(\004\032\227\001\n\016RegionOpenInfo\022\033\n\006region\030\001 " +
+ "\002(\0132\013.RegionInfo\022\037\n\027version_of_offline_n" +
+ "ode\030\002 \001(\r\022\"\n\rfavored_nodes\030\003 \003(\0132\013.Serve",
+ "rName\022#\n\033openForDistributedLogReplay\030\004 \001" +
+ "(\010\"\235\001\n\022OpenRegionResponse\022=\n\ropening_sta" +
+ "te\030\001 \003(\0162&.OpenRegionResponse.RegionOpen" +
+ "ingState\"H\n\022RegionOpeningState\022\n\n\006OPENED" +
+ "\020\000\022\022\n\016ALREADY_OPENED\020\001\022\022\n\016FAILED_OPENING" +
+ "\020\002\"\271\001\n\022CloseRegionRequest\022 \n\006region\030\001 \002(" +
+ "\0132\020.RegionSpecifier\022\037\n\027version_of_closin" +
+ "g_node\030\002 \001(\r\022\036\n\020transition_in_ZK\030\003 \001(\010:\004" +
+ "true\022\'\n\022destination_server\030\004 \001(\0132\013.Serve" +
+ "rName\022\027\n\017serverStartCode\030\005 \001(\004\"%\n\023CloseR",
+ "egionResponse\022\016\n\006closed\030\001 \002(\010\"P\n\022FlushRe" +
+ "gionRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpec" +
+ "ifier\022\030\n\020if_older_than_ts\030\002 \001(\004\"?\n\023Flush" +
+ "RegionResponse\022\027\n\017last_flush_time\030\001 \002(\004\022" +
+ "\017\n\007flushed\030\002 \001(\010\"K\n\022SplitRegionRequest\022 " +
+ "\n\006region\030\001 \002(\0132\020.RegionSpecifier\022\023\n\013spli" +
+ "t_point\030\002 \001(\014\"\025\n\023SplitRegionResponse\"W\n\024" +
+ "CompactRegionRequest\022 \n\006region\030\001 \002(\0132\020.R" +
+ "egionSpecifier\022\r\n\005major\030\002 \001(\010\022\016\n\006family\030" +
+ "\003 \001(\014\"\027\n\025CompactRegionResponse\"\262\001\n\031Updat",
+ "eFavoredNodesRequest\022@\n\013update_info\030\001 \003(" +
+ "\0132+.UpdateFavoredNodesRequest.RegionUpda" +
+ "teInfo\032S\n\020RegionUpdateInfo\022\033\n\006region\030\001 \002" +
+ "(\0132\013.RegionInfo\022\"\n\rfavored_nodes\030\002 \003(\0132\013" +
+ ".ServerName\".\n\032UpdateFavoredNodesRespons" +
+ "e\022\020\n\010response\030\001 \001(\r\"v\n\023MergeRegionsReque" +
+ "st\022\"\n\010region_a\030\001 \002(\0132\020.RegionSpecifier\022\"" +
+ "\n\010region_b\030\002 \002(\0132\020.RegionSpecifier\022\027\n\010fo" +
+ "rcible\030\003 \001(\010:\005false\"\026\n\024MergeRegionsRespo" +
+ "nse\"X\n\010WALEntry\022\024\n\003key\030\001 \002(\0132\007.WALKey\022\027\n",
+ "\017key_value_bytes\030\002 \003(\014\022\035\n\025associated_cel" +
+ "l_count\030\003 \001(\005\"4\n\030ReplicateWALEntryReques" +
+ "t\022\030\n\005entry\030\001 \003(\0132\t.WALEntry\"\033\n\031Replicate" +
+ "WALEntryResponse\"\026\n\024RollWALWriterRequest" +
+ "\"0\n\025RollWALWriterResponse\022\027\n\017region_to_f" +
+ "lush\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006reaso" +
+ "n\030\001 \002(\t\"\024\n\022StopServerResponse\"\026\n\024GetServ" +
+ "erInfoRequest\"B\n\nServerInfo\022 \n\013server_na" +
+ "me\030\001 \002(\0132\013.ServerName\022\022\n\nwebui_port\030\002 \001(" +
+ "\r\"9\n\025GetServerInfoResponse\022 \n\013server_inf",
+ "o\030\001 \002(\0132\013.ServerInfo\"\034\n\032UpdateConfigurat" +
+ "ionRequest\"\035\n\033UpdateConfigurationRespons" +
+ "e2\230\010\n\014AdminService\022>\n\rGetRegionInfo\022\025.Ge" +
+ "tRegionInfoRequest\032\026.GetRegionInfoRespon" +
+ "se\022;\n\014GetStoreFile\022\024.GetStoreFileRequest" +
+ "\032\025.GetStoreFileResponse\022D\n\017GetOnlineRegi" +
+ "on\022\027.GetOnlineRegionRequest\032\030.GetOnlineR" +
+ "egionResponse\0225\n\nOpenRegion\022\022.OpenRegion" +
+ "Request\032\023.OpenRegionResponse\0228\n\013CloseReg" +
+ "ion\022\023.CloseRegionRequest\032\024.CloseRegionRe",
+ "sponse\0228\n\013FlushRegion\022\023.FlushRegionReque" +
+ "st\032\024.FlushRegionResponse\0228\n\013SplitRegion\022" +
+ "\023.SplitRegionRequest\032\024.SplitRegionRespon" +
+ "se\022>\n\rCompactRegion\022\025.CompactRegionReque" +
+ "st\032\026.CompactRegionResponse\022;\n\014MergeRegio" +
+ "ns\022\024.MergeRegionsRequest\032\025.MergeRegionsR" +
+ "esponse\022J\n\021ReplicateWALEntry\022\031.Replicate" +
+ "WALEntryRequest\032\032.ReplicateWALEntryRespo" +
+ "nse\022?\n\006Replay\022\031.ReplicateWALEntryRequest" +
+ "\032\032.ReplicateWALEntryResponse\022>\n\rRollWALW",
+ "riter\022\025.RollWALWriterRequest\032\026.RollWALWr" +
+ "iterResponse\022>\n\rGetServerInfo\022\025.GetServe" +
+ "rInfoRequest\032\026.GetServerInfoResponse\0225\n\n" +
+ "StopServer\022\022.StopServerRequest\032\023.StopSer" +
+ "verResponse\022M\n\022UpdateFavoredNodes\022\032.Upda" +
+ "teFavoredNodesRequest\032\033.UpdateFavoredNod" +
+ "esResponse\022P\n\023UpdateConfiguration\022\033.Upda" +
+ "teConfigurationRequest\032\034.UpdateConfigura" +
+ "tionResponseBA\n*org.apache.hadoop.hbase." +
+ "protobuf.generatedB\013AdminProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -22150,7 +22240,7 @@ public final class AdminProtos {
internal_static_GetRegionInfoResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_GetRegionInfoResponse_descriptor,
- new java.lang.String[] { "RegionInfo", "CompactionState", "IsRecovering", });
+ new java.lang.String[] { "RegionInfo", "CompactionState", "IsRecovering", "OldestHfileTs", });
internal_static_GetStoreFileRequest_descriptor =
getDescriptor().getMessageTypes().get(2);
internal_static_GetStoreFileRequest_fieldAccessorTable = new
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
index 5bc44ff..e5abb88 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
@@ -2171,6 +2171,16 @@ public final class ClusterStatusProtos {
*
*/
float getDataLocality();
+
+ // optional uint64 oldest_hfile_ts = 17 [default = 0];
+ /**
+ * optional uint64 oldest_hfile_ts = 17 [default = 0];
+ */
+ boolean hasOldestHfileTs();
+ /**
+ * optional uint64 oldest_hfile_ts = 17 [default = 0];
+ */
+ long getOldestHfileTs();
}
/**
* Protobuf type {@code RegionLoad}
@@ -2311,6 +2321,11 @@ public final class ClusterStatusProtos {
dataLocality_ = input.readFloat();
break;
}
+ case 136: {
+ bitField0_ |= 0x00010000;
+ oldestHfileTs_ = input.readUInt64();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -2753,6 +2768,22 @@ public final class ClusterStatusProtos {
return dataLocality_;
}
+ // optional uint64 oldest_hfile_ts = 17 [default = 0];
+ public static final int OLDEST_HFILE_TS_FIELD_NUMBER = 17;
+ private long oldestHfileTs_;
+ /**
+ * optional uint64 oldest_hfile_ts = 17 [default = 0];
+ */
+ public boolean hasOldestHfileTs() {
+ return ((bitField0_ & 0x00010000) == 0x00010000);
+ }
+ /**
+ * optional uint64 oldest_hfile_ts = 17 [default = 0];
+ */
+ public long getOldestHfileTs() {
+ return oldestHfileTs_;
+ }
+
private void initFields() {
regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
stores_ = 0;
@@ -2770,6 +2801,7 @@ public final class ClusterStatusProtos {
totalStaticBloomSizeKB_ = 0;
completeSequenceId_ = 0L;
dataLocality_ = 0F;
+ oldestHfileTs_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -2839,6 +2871,9 @@ public final class ClusterStatusProtos {
if (((bitField0_ & 0x00008000) == 0x00008000)) {
output.writeFloat(16, dataLocality_);
}
+ if (((bitField0_ & 0x00010000) == 0x00010000)) {
+ output.writeUInt64(17, oldestHfileTs_);
+ }
getUnknownFields().writeTo(output);
}
@@ -2912,6 +2947,10 @@ public final class ClusterStatusProtos {
size += com.google.protobuf.CodedOutputStream
.computeFloatSize(16, dataLocality_);
}
+ if (((bitField0_ & 0x00010000) == 0x00010000)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(17, oldestHfileTs_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -3014,6 +3053,11 @@ public final class ClusterStatusProtos {
if (hasDataLocality()) {
result = result && (Float.floatToIntBits(getDataLocality()) == Float.floatToIntBits(other.getDataLocality()));
}
+ result = result && (hasOldestHfileTs() == other.hasOldestHfileTs());
+ if (hasOldestHfileTs()) {
+ result = result && (getOldestHfileTs()
+ == other.getOldestHfileTs());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -3092,6 +3136,10 @@ public final class ClusterStatusProtos {
hash = (53 * hash) + Float.floatToIntBits(
getDataLocality());
}
+ if (hasOldestHfileTs()) {
+ hash = (37 * hash) + OLDEST_HFILE_TS_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getOldestHfileTs());
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -3238,6 +3286,8 @@ public final class ClusterStatusProtos {
bitField0_ = (bitField0_ & ~0x00004000);
dataLocality_ = 0F;
bitField0_ = (bitField0_ & ~0x00008000);
+ oldestHfileTs_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00010000);
return this;
}
@@ -3334,6 +3384,10 @@ public final class ClusterStatusProtos {
to_bitField0_ |= 0x00008000;
}
result.dataLocality_ = dataLocality_;
+ if (((from_bitField0_ & 0x00010000) == 0x00010000)) {
+ to_bitField0_ |= 0x00010000;
+ }
+ result.oldestHfileTs_ = oldestHfileTs_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -3398,6 +3452,9 @@ public final class ClusterStatusProtos {
if (other.hasDataLocality()) {
setDataLocality(other.getDataLocality());
}
+ if (other.hasOldestHfileTs()) {
+ setOldestHfileTs(other.getOldestHfileTs());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -4337,6 +4394,39 @@ public final class ClusterStatusProtos {
return this;
}
+ // optional uint64 oldest_hfile_ts = 17 [default = 0];
+ private long oldestHfileTs_ ;
+ /**
+ * optional uint64 oldest_hfile_ts = 17 [default = 0];
+ */
+ public boolean hasOldestHfileTs() {
+ return ((bitField0_ & 0x00010000) == 0x00010000);
+ }
+ /**
+ * optional uint64 oldest_hfile_ts = 17 [default = 0];
+ */
+ public long getOldestHfileTs() {
+ return oldestHfileTs_;
+ }
+ /**
+ * optional uint64 oldest_hfile_ts = 17 [default = 0];
+ */
+ public Builder setOldestHfileTs(long value) {
+ bitField0_ |= 0x00010000;
+ oldestHfileTs_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional uint64 oldest_hfile_ts = 17 [default = 0];
+ */
+ public Builder clearOldestHfileTs() {
+ bitField0_ = (bitField0_ & ~0x00010000);
+ oldestHfileTs_ = 0L;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:RegionLoad)
}
@@ -10472,7 +10562,7 @@ public final class ClusterStatusProtos {
"PLITTING_NEW\020\r\022\017\n\013MERGING_NEW\020\016\"X\n\022Regio",
"nInTransition\022\036\n\004spec\030\001 \002(\0132\020.RegionSpec" +
"ifier\022\"\n\014region_state\030\002 \002(\0132\014.RegionStat" +
- "e\"\347\003\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" +
+ "e\"\203\004\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" +
"(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\n" +
"storefiles\030\003 \001(\r\022\"\n\032store_uncompressed_s" +
"ize_MB\030\004 \001(\r\022\031\n\021storefile_size_MB\030\005 \001(\r\022" +
@@ -10484,26 +10574,27 @@ public final class ClusterStatusProtos {
"\030\014 \001(\r\022\"\n\032total_static_index_size_KB\030\r \001" +
"(\r\022\"\n\032total_static_bloom_size_KB\030\016 \001(\r\022\034" +
"\n\024complete_sequence_id\030\017 \001(\004\022\025\n\rdata_loc" +
- "ality\030\020 \001(\002\"\212\002\n\nServerLoad\022\032\n\022number_of_" +
- "requests\030\001 \001(\r\022 \n\030total_number_of_reques" +
- "ts\030\002 \001(\r\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013max_he" +
- "ap_MB\030\004 \001(\r\022!\n\014region_loads\030\005 \003(\0132\013.Regi" +
- "onLoad\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocess" +
- "or\022\031\n\021report_start_time\030\007 \001(\004\022\027\n\017report_",
- "end_time\030\010 \001(\004\022\030\n\020info_server_port\030\t \001(\r" +
- "\"O\n\016LiveServerInfo\022\033\n\006server\030\001 \002(\0132\013.Ser" +
- "verName\022 \n\013server_load\030\002 \002(\0132\013.ServerLoa" +
- "d\"\340\002\n\rClusterStatus\022/\n\rhbase_version\030\001 \001" +
- "(\0132\030.HBaseVersionFileContent\022%\n\014live_ser" +
- "vers\030\002 \003(\0132\017.LiveServerInfo\022!\n\014dead_serv" +
- "ers\030\003 \003(\0132\013.ServerName\0222\n\025regions_in_tra" +
- "nsition\030\004 \003(\0132\023.RegionInTransition\022\036\n\ncl" +
- "uster_id\030\005 \001(\0132\n.ClusterId\022)\n\023master_cop" +
- "rocessors\030\006 \003(\0132\014.Coprocessor\022\033\n\006master\030",
- "\007 \001(\0132\013.ServerName\022#\n\016backup_masters\030\010 \003" +
- "(\0132\013.ServerName\022\023\n\013balancer_on\030\t \001(\010BF\n*" +
- "org.apache.hadoop.hbase.protobuf.generat" +
- "edB\023ClusterStatusProtosH\001\240\001\001"
+ "ality\030\020 \001(\002\022\032\n\017oldest_hfile_ts\030\021 \001(\004:\0010\"" +
+ "\212\002\n\nServerLoad\022\032\n\022number_of_requests\030\001 \001" +
+ "(\r\022 \n\030total_number_of_requests\030\002 \001(\r\022\024\n\014" +
+ "used_heap_MB\030\003 \001(\r\022\023\n\013max_heap_MB\030\004 \001(\r\022" +
+ "!\n\014region_loads\030\005 \003(\0132\013.RegionLoad\022\"\n\014co" +
+ "processors\030\006 \003(\0132\014.Coprocessor\022\031\n\021report",
+ "_start_time\030\007 \001(\004\022\027\n\017report_end_time\030\010 \001" +
+ "(\004\022\030\n\020info_server_port\030\t \001(\r\"O\n\016LiveServ" +
+ "erInfo\022\033\n\006server\030\001 \002(\0132\013.ServerName\022 \n\013s" +
+ "erver_load\030\002 \002(\0132\013.ServerLoad\"\340\002\n\rCluste" +
+ "rStatus\022/\n\rhbase_version\030\001 \001(\0132\030.HBaseVe" +
+ "rsionFileContent\022%\n\014live_servers\030\002 \003(\0132\017" +
+ ".LiveServerInfo\022!\n\014dead_servers\030\003 \003(\0132\013." +
+ "ServerName\0222\n\025regions_in_transition\030\004 \003(" +
+ "\0132\023.RegionInTransition\022\036\n\ncluster_id\030\005 \001" +
+ "(\0132\n.ClusterId\022)\n\023master_coprocessors\030\006 ",
+ "\003(\0132\014.Coprocessor\022\033\n\006master\030\007 \001(\0132\013.Serv" +
+ "erName\022#\n\016backup_masters\030\010 \003(\0132\013.ServerN" +
+ "ame\022\023\n\013balancer_on\030\t \001(\010BF\n*org.apache.h" +
+ "adoop.hbase.protobuf.generatedB\023ClusterS" +
+ "tatusProtosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -10527,7 +10618,7 @@ public final class ClusterStatusProtos {
internal_static_RegionLoad_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionLoad_descriptor,
- new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", });
+ new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", "OldestHfileTs", });
internal_static_ServerLoad_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_ServerLoad_fieldAccessorTable = new
diff --git a/hbase-protocol/src/main/protobuf/Admin.proto b/hbase-protocol/src/main/protobuf/Admin.proto
index fcc4e1d..6fdcf35 100644
--- a/hbase-protocol/src/main/protobuf/Admin.proto
+++ b/hbase-protocol/src/main/protobuf/Admin.proto
@@ -37,6 +37,7 @@ message GetRegionInfoResponse {
required RegionInfo region_info = 1;
optional CompactionState compaction_state = 2;
optional bool isRecovering = 3;
+ optional uint64 oldest_hfile_ts = 4 [default = 0];
enum CompactionState {
NONE = 0;
diff --git a/hbase-protocol/src/main/protobuf/ClusterStatus.proto b/hbase-protocol/src/main/protobuf/ClusterStatus.proto
index 7e78395..a3872ca 100644
--- a/hbase-protocol/src/main/protobuf/ClusterStatus.proto
+++ b/hbase-protocol/src/main/protobuf/ClusterStatus.proto
@@ -113,6 +113,8 @@ message RegionLoad {
/** The current data locality for region in the regionserver */
optional float data_locality = 16;
+
+ optional uint64 oldest_hfile_ts = 17 [default = 0];
}
/* Server-level protobufs */
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
index 2bef680..d57e618 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
@@ -148,6 +148,8 @@ public abstract class AbstractHFileWriter implements HFile.Writer {
int avgValueLen =
entryCount == 0 ? 0 : (int) (totalValueLength / entryCount);
fileInfo.append(FileInfo.AVG_VALUE_LEN, Bytes.toBytes(avgValueLen), false);
+
+ fileInfo.append(FileInfo.CREATE_TIME_TS, Bytes.toBytes(hFileContext.getFileCreateTime()), false);
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index 1e97f63..ad62d71 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -541,6 +541,7 @@ public class HFile {
static final byte [] LASTKEY = Bytes.toBytes(RESERVED_PREFIX + "LASTKEY");
static final byte [] AVG_KEY_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_KEY_LEN");
static final byte [] AVG_VALUE_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_VALUE_LEN");
+ static final byte [] CREATE_TIME_TS = Bytes.toBytes(RESERVED_PREFIX + "CREATE_TIME_TS");
static final byte [] COMPARATOR = Bytes.toBytes(RESERVED_PREFIX + "COMPARATOR");
static final byte [] TAGS_COMPRESSED = Bytes.toBytes(RESERVED_PREFIX + "TAGS_COMPRESSED");
public static final byte [] MAX_TAGS_LEN = Bytes.toBytes(RESERVED_PREFIX + "MAX_TAGS_LEN");
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
index d58ca10..f33fca4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
@@ -157,6 +157,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
// File info
fileInfo = new FileInfo();
fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
+ this.hfileContext.setFileCreateTime(Bytes.toLong(fileInfo.get(FileInfo.CREATE_TIME_TS)));
lastKey = fileInfo.get(FileInfo.LASTKEY);
avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN));
avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN));
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index dd1cf8d..5e13e50 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1493,6 +1493,21 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
return Collections.min(lastStoreFlushTimeMap.values());
}
+ /**
+ * This can be used to determine the last all file of this region were major compacted.
+ *
+ * @return the timestamp of the oldest HFile for all stores of this region
+ */
+ public long getOldestHfileTs() {
+ long result = Long.MAX_VALUE;
+ for (Store store : getStores().values()) {
+ for (StoreFile file : store.getStorefiles()) {
+ result = Math.min(result, file.getReader().getHFileReader().getFileContext().getFileCreateTime());
+ }
+ }
+ return result == Long.MAX_VALUE ? 0 : result;
+ }
+
//////////////////////////////////////////////////////////////////////////////
// HRegion maintenance.
//
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 4669f8f..a53ab3c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1344,9 +1344,13 @@ public class HRegionServer extends HasThread implements
int totalStaticBloomSizeKB = 0;
long totalCompactingKVs = 0;
long currentCompactedKVs = 0;
+ long oldestHfileTs = Long.MAX_VALUE;
synchronized (r.stores) {
stores += r.stores.size();
for (Store store : r.stores.values()) {
+ for (StoreFile file : store.getStorefiles()) {
+ oldestHfileTs = Math.min(oldestHfileTs, file.getReader().getHFileReader().getFileContext().getFileCreateTime());
+ }
storefiles += store.getStorefilesCount();
storeUncompressedSizeMB += (int) (store.getStoreSizeUncompressed()
/ 1024 / 1024);
@@ -1368,6 +1372,9 @@ public class HRegionServer extends HasThread implements
(int) (store.getTotalStaticBloomSize() / 1024);
}
}
+ if (oldestHfileTs == Long.MAX_VALUE) {
+ oldestHfileTs = 0;
+ }
float dataLocality =
r.getHDFSBlocksDistribution().getBlockLocalityIndex(serverName.getHostname());
if (regionLoadBldr == null) {
@@ -1393,8 +1400,8 @@ public class HRegionServer extends HasThread implements
.setTotalCompactingKVs(totalCompactingKVs)
.setCurrentCompactedKVs(currentCompactedKVs)
.setCompleteSequenceId(r.maxFlushedSeqId)
- .setDataLocality(dataLocality);
-
+ .setDataLocality(dataLocality)
+ .setOldestHfileTs(oldestHfileTs);
return regionLoadBldr.build();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 047d689..942b47f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -991,6 +991,7 @@ public class HStore implements Store {
.withHBaseCheckSum(true)
.withDataBlockEncoding(family.getDataBlockEncoding())
.withEncryptionContext(cryptoContext)
+ .withCreateTime(EnvironmentEdgeManager.currentTime())
.build();
return hFileContext;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 814cbc1..d50ef5f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -1137,6 +1137,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
builder.setRegionInfo(HRegionInfo.convert(info));
if (request.hasCompactionState() && request.getCompactionState()) {
builder.setCompactionState(region.getCompactionState());
+ builder.setOldestHfileTs(region.getOldestHfileTs());
}
builder.setIsRecovering(region.isRecovering());
return builder.build();