Index: src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (revision 1085248) +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (working copy) @@ -132,6 +132,54 @@ store = new Store(basedir, region, hcd, fs, conf); } + public void testLowestModificationTime() throws Exception { + Configuration conf = HBaseConfiguration.create(); + FileSystem fs = FileSystem.get(conf); + // Initialize region + init(getName(), conf); + + int storeFileNum = 4; + for (int i = 1; i <= storeFileNum; i++) { + LOG.info("Adding some data for the store file #"+i); + this.store.add(new KeyValue(row, family, qf1, i, (byte[])null)); + this.store.add(new KeyValue(row, family, qf2, i, (byte[])null)); + this.store.add(new KeyValue(row, family, qf3, i, (byte[])null)); + flush(i); + } + // after flush; check the lowest time stamp + long lowestTimeStampFromStore = + Store.getLowestTimestamp(store.getStorefiles()); + long lowestTimeStampFromFS = + getLowestTimeStampFromFS(fs,store.getStorefiles()); + assertEquals(lowestTimeStampFromStore,lowestTimeStampFromFS); + + // after compact; check the lowest time stamp + store.compact(); + lowestTimeStampFromStore = Store.getLowestTimestamp(store.getStorefiles()); + lowestTimeStampFromFS = getLowestTimeStampFromFS(fs,store.getStorefiles()); + assertEquals(lowestTimeStampFromStore,lowestTimeStampFromFS); + } + + private static long getLowestTimeStampFromFS(FileSystem fs, + final List candidates) throws IOException { + long minTs = Long.MAX_VALUE; + if (candidates.isEmpty()) { + return minTs; + } + Path[] p = new Path[candidates.size()]; + for (int i = 0; i < candidates.size(); ++i) { + p[i] = candidates.get(i).getPath(); + } + + FileStatus[] stats = fs.listStatus(p); + if (stats == null || stats.length == 0) { + return minTs; + } + for (FileStatus s : stats) { + minTs = Math.min(minTs, s.getModificationTime()); + } + return minTs; + } ////////////////////////////////////////////////////////////////////////////// // Get tests Index: src/main/java/org/apache/hadoop/hbase/regionserver/Store.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (revision 1085248) +++ src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (working copy) @@ -725,24 +725,12 @@ * @param dir * @throws IOException */ - private static long getLowestTimestamp(FileSystem fs, - final List candidates) throws IOException { + public static long getLowestTimestamp(final List candidates) + throws IOException { long minTs = Long.MAX_VALUE; - if (candidates.isEmpty()) { - return minTs; + for (StoreFile storeFile : candidates) { + minTs = Math.min(minTs, storeFile.getModificationTimeStamp()); } - Path[] p = new Path[candidates.size()]; - for (int i = 0; i < candidates.size(); ++i) { - p[i] = candidates.get(i).getPath(); - } - - FileStatus[] stats = fs.listStatus(p); - if (stats == null || stats.length == 0) { - return minTs; - } - for (FileStatus s : stats) { - minTs = Math.min(minTs, s.getModificationTime()); - } return minTs; } @@ -781,7 +769,7 @@ return result; } // TODO: Use better method for determining stamp of last major (HBASE-2990) - long lowTimestamp = getLowestTimestamp(fs, filesToCompact); + long lowTimestamp = getLowestTimestamp(filesToCompact); long now = System.currentTimeMillis(); if (lowTimestamp > 0l && lowTimestamp < (now - this.majorCompactionTime)) { // Major compaction time has elapsed. Index: src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (revision 1085248) +++ src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (working copy) @@ -40,6 +40,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.KeyValue; @@ -171,6 +172,8 @@ private final Configuration conf; private final BloomType bloomType; + // the last modification time stamp + private long modificationTimeStamp = 0L; /** * Constructor, loads a reader and it's indices, etc. May allocate a @@ -207,6 +210,14 @@ this.bloomType = BloomType.NONE; LOG.info("Ignoring bloom filter check for file (disabled in config)"); } + + // cache the modification time stamp of this store file + FileStatus[] stats = fs.listStatus(p); + if (stats != null && stats.length == 1) { + this.modificationTimeStamp = stats[0].getModificationTime(); + } else { + this.modificationTimeStamp = 0; + } } /** @@ -296,6 +307,10 @@ return this.sequenceid; } + public long getModificationTimeStamp() { + return modificationTimeStamp; + } + /** * Return the highest sequence ID found across all storefiles in * the given list. Store files that were created by a mapreduce