From bee20371466ddc0e1346cb61bf06b9230876f1c9 Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Wed, 20 Aug 2014 18:38:47 -0700 Subject: [PATCH] HBASE-11764 Support per cell TTLs --- .../main/java/org/apache/hadoop/hbase/TagType.java | 1 + .../hadoop/hbase/regionserver/DefaultMemStore.java | 4 +- .../regionserver/GetClosestRowBeforeTracker.java | 18 +++-- .../apache/hadoop/hbase/regionserver/HStore.java | 47 ++++++++++- .../hbase/regionserver/ScanQueryMatcher.java | 40 +++++++--- .../hadoop/hbase/regionserver/StoreScanner.java | 12 +-- .../hadoop/hbase/regionserver/TestHRegion.java | 91 ++++++++++++++++++++++ .../hbase/regionserver/TestQueryMatcher.java | 18 +++-- 8 files changed, 195 insertions(+), 36 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TagType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagType.java index a21f7de..d50373d 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TagType.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagType.java @@ -28,4 +28,5 @@ public final class TagType { public static final byte VISIBILITY_TAG_TYPE = (byte) 2; public static final byte LOG_REPLAY_TAG_TYPE = (byte) 3; public static final byte VISIBILITY_EXP_SERIALIZATION_FORMAT_TAG_TYPE = (byte)4; + public static final byte TTL_TAG_TYPE = (byte)5; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index d90357b..0dbc8dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -410,7 +410,7 @@ public class DefaultMemStore implements MemStore { KeyValue kv = i.next(); // Did we go beyond the target row? If so break. if (state.isTooFar(kv, firstOnRow)) break; - if (state.isExpired(kv)) { + if (state.isExpired(kv) != 0) { i.remove(); continue; } @@ -632,7 +632,7 @@ public class DefaultMemStore implements MemStore { if (head.isEmpty()) return null; for (Iterator i = head.descendingIterator(); i.hasNext();) { KeyValue found = i.next(); - if (state.isExpired(found)) { + if (state.isExpired(found) != 0) { i.remove(); continue; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java index ec676fa..cdb68a0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.util.Bytes; /** * State and utility processing {@link HRegion#getClosestRowBefore(byte[], byte[])}. - * Like {@link ScanDeleteTracker} and {@link ScanDeleteTracker} but does not + * Like {@link ScanQueryMatcher} and {@link ScanDeleteTracker} but does not * implement the {@link DeleteTracker} interface since state spans rows (There * is no update nor reset method). */ @@ -42,7 +42,8 @@ import org.apache.hadoop.hbase.util.Bytes; class GetClosestRowBeforeTracker { private final KeyValue targetkey; // Any cell w/ a ts older than this is expired. - private final long oldestts; + private final long now; + private final long oldestUnexpiredTs; private Cell candidate = null; private final KVComparator kvcomparator; // Flag for whether we're doing getclosest on a metaregion. @@ -75,17 +76,20 @@ class GetClosestRowBeforeTracker { HConstants.DELIMITER) - this.rowoffset; } this.tablenamePlusDelimiterLength = metaregion? l + 1: -1; - this.oldestts = System.currentTimeMillis() - ttl; + this.now = System.currentTimeMillis(); + this.oldestUnexpiredTs = now - ttl; this.kvcomparator = c; this.deletes = new TreeMap>(new CellComparator.RowComparator()); } /** - * @param kv - * @return True if this kv is expired. + * @param cell + * @return 1 if the cell is expired and no other cells will be alive; -1 if + * the cell is expired but if we are not sure other cells should be skipped; + * 0 otherwise */ - boolean isExpired(final Cell kv) { - return HStore.isExpired(kv, this.oldestts); + int isExpired(final Cell cell) { + return HStore.isExpired(cell, this.oldestUnexpiredTs, this.now); } /* diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index dc593ac..87e64c2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -57,6 +57,8 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Tag; +import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.crypto.Cipher; @@ -1663,8 +1665,47 @@ public class HStore implements Store { return wantedVersions > maxVersions ? maxVersions: wantedVersions; } - static boolean isExpired(final Cell key, final long oldestTimestamp) { - return key.getTimestamp() < oldestTimestamp; + /** + * @param cell + * @param oldestTimestamp + * @return 1 if the cell is expired and no other cells will be alive; -1 if + * the cell is expired but if we are not sure other cells should be skipped; + * 0 otherwise + */ + static int isExpired(final Cell cell, final long oldestTimestamp, final long now) { + // Do not create an Iterator or Tag objects unless the cell actually has + // tags + if (cell.getTagsLength() > 0) { + // Look for a TTL tag first. Use it instead of the family setting if + // found. If a cell has multiple TTLs, resolve the conflict by using the + // first tag encountered. + Iterator i = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(), + cell.getTagsLength()); + while (i.hasNext()) { + Tag t = i.next(); + if (TagType.TTL_TAG_TYPE == t.getType()) { + if (t.getTagLength() == Bytes.SIZEOF_LONG) { + // Unlike in schema cell TTLs are stored in milliseconds, no need + // to convert + long ts = cell.getTimestamp(); + long ttl = Bytes.toLong(t.getBuffer(), t.getTagOffset(), t.getTagLength()); + if (ts + ttl < now) { + return -1; + } + // Per cell TTLs cannot extend lifetime beyond family settings, so + // fall through to check that + break; + } else { + LOG.warn("TTL tag for cell " + cell + " has wrong size: have=" + t.getTagLength() + + ", want=" + Bytes.SIZEOF_LONG); + } + } + } + } + if (cell.getTimestamp() < oldestTimestamp) { + return 1; + } + return 0; } @Override @@ -1806,7 +1847,7 @@ public class HStore implements Store { if (this.comparator.compareRows(kv, firstOnRow) < 0) continue; // Did we go beyond the target row? If so break. if (state.isTooFar(kv, firstOnRow)) break; - if (state.isExpired(kv)) { + if (state.isExpired(kv) != 0) { continue; } // If we added something, this row is a contender. break. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java index 165eb6b..5788249 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java @@ -102,7 +102,7 @@ public class ScanQueryMatcher { /** The oldest timestamp we are interested in, based on TTL */ private final long oldestUnexpiredTS; - + private final long now; /** readPoint over which the KVs are unconditionally included */ protected long maxReadPointToTrackVersions; @@ -155,7 +155,7 @@ public class ScanQueryMatcher { */ public ScanQueryMatcher(Scan scan, ScanInfo scanInfo, NavigableSet columns, ScanType scanType, long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, - RegionCoprocessorHost regionCoprocessorHost) throws IOException { + long now, RegionCoprocessorHost regionCoprocessorHost) throws IOException { this.tr = scan.getTimeRange(); this.rowComparator = scanInfo.getComparator(); this.regionCoprocessorHost = regionCoprocessorHost; @@ -166,6 +166,7 @@ public class ScanQueryMatcher { this.filter = scan.getFilter(); this.earliestPutTs = earliestPutTs; this.oldestUnexpiredTS = oldestUnexpiredTS; + this.now = now; this.maxReadPointToTrackVersions = readPointToUse; this.timeToPurgeDeletes = scanInfo.getTimeToPurgeDeletes(); @@ -216,18 +217,18 @@ public class ScanQueryMatcher { * @param scanInfo The store's immutable scan info * @param columns * @param earliestPutTs Earliest put seen in any of the store files. - * @param oldestUnexpiredTS the oldest timestamp we are interested in, - * based on TTL + * @param oldestUnexpiredTS the oldest timestamp we are interested in, based on TTL + * @param now the current server time * @param dropDeletesFromRow The inclusive left bound of the range; can be EMPTY_START_ROW. * @param dropDeletesToRow The exclusive right bound of the range; can be EMPTY_END_ROW. * @param regionCoprocessorHost * @throws IOException */ public ScanQueryMatcher(Scan scan, ScanInfo scanInfo, NavigableSet columns, - long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, byte[] dropDeletesFromRow, + long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, long now, byte[] dropDeletesFromRow, byte[] dropDeletesToRow, RegionCoprocessorHost regionCoprocessorHost) throws IOException { this(scan, scanInfo, columns, ScanType.COMPACT_RETAIN_DELETES, readPointToUse, earliestPutTs, - oldestUnexpiredTS, regionCoprocessorHost); + oldestUnexpiredTS, now, regionCoprocessorHost); Preconditions.checkArgument((dropDeletesFromRow != null) && (dropDeletesToRow != null)); this.dropDeletesFromRow = dropDeletesFromRow; this.dropDeletesToRow = dropDeletesToRow; @@ -237,10 +238,10 @@ public class ScanQueryMatcher { * Constructor for tests */ ScanQueryMatcher(Scan scan, ScanInfo scanInfo, - NavigableSet columns, long oldestUnexpiredTS) throws IOException { + NavigableSet columns, long oldestUnexpiredTS, long now) throws IOException { this(scan, scanInfo, columns, ScanType.USER_SCAN, Long.MAX_VALUE, /* max Readpoint to track versions */ - HConstants.LATEST_TIMESTAMP, oldestUnexpiredTS, null); + HConstants.LATEST_TIMESTAMP, oldestUnexpiredTS, now, null); } /** @@ -361,10 +362,17 @@ public class ScanQueryMatcher { // note the following next else if... // delete marker are not subject to other delete markers } else { - // If the cell is expired and we have enough versions, skip - if (columns.hasMinVersions() && HStore.isExpired(cell, oldestUnexpiredTS)) { - return columns.getNextRowOrNextColumn(cell.getQualifierArray(), qualifierOffset, + if (columns.hasMinVersions()) { + int expired = isExpired(cell); + if (expired < 0) { + // If the cell is expired and we have enough versions, skip + return MatchCode.SKIP; + } else if (expired >= 1) { + // If the cell is expired and we have enough versions, and we are + // sure no other cells can be alive, then skip forward + return columns.getNextRowOrNextColumn(cell.getQualifierArray(), qualifierOffset, qualifierLength); + } } // Check deletes if (!this.deletes.isEmpty()) { @@ -452,6 +460,16 @@ public class ScanQueryMatcher { return colChecker; } + /** + * @param cell + * @return 1 if the cell is expired and no other cells will be alive; -1 if + * the cell is expired but if we are not sure other cells should be skipped; + * 0 otherwise + */ + int isExpired(final Cell cell) { + return HStore.isExpired(cell, this.oldestUnexpiredTS, this.now); + } + /** Handle partial-drop-deletes. As we match keys in order, when we have a range from which * we can drop deletes, we can set retainDeletesInOutput to false for the duration of this * range only, and maintain consistency. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 1ef3e91..563ef95 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -76,6 +76,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner protected final Scan scan; protected final NavigableSet columns; protected final long oldestUnexpiredTS; + protected final long now; protected final int minVersions; protected final long maxRowSize; @@ -122,7 +123,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner explicitColumnQuery = numCol > 0; this.scan = scan; this.columns = columns; - oldestUnexpiredTS = EnvironmentEdgeManager.currentTimeMillis() - ttl; + this.now = EnvironmentEdgeManager.currentTimeMillis(); + this.oldestUnexpiredTS = now - ttl; this.minVersions = minVersions; if (store != null && ((HStore)store).getHRegion() != null @@ -172,7 +174,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner } matcher = new ScanQueryMatcher(scan, scanInfo, columns, ScanType.USER_SCAN, Long.MAX_VALUE, HConstants.LATEST_TIMESTAMP, - oldestUnexpiredTS, store.getCoprocessorHost()); + oldestUnexpiredTS, now, store.getCoprocessorHost()); this.store.addChangedReaderObserver(this); @@ -237,10 +239,10 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED)); if (dropDeletesFromRow == null) { matcher = new ScanQueryMatcher(scan, scanInfo, null, scanType, smallestReadPoint, - earliestPutTs, oldestUnexpiredTS, store.getCoprocessorHost()); + earliestPutTs, oldestUnexpiredTS, now, store.getCoprocessorHost()); } else { matcher = new ScanQueryMatcher(scan, scanInfo, null, smallestReadPoint, earliestPutTs, - oldestUnexpiredTS, dropDeletesFromRow, dropDeletesToRow, store.getCoprocessorHost()); + oldestUnexpiredTS, now, dropDeletesFromRow, dropDeletesToRow, store.getCoprocessorHost()); } // Filter the list of scanners using Bloom filters, time range, TTL, etc. @@ -280,7 +282,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner this(null, scan.getCacheBlocks(), scan, columns, scanInfo.getTtl(), scanInfo.getMinVersions(), readPt); this.matcher = new ScanQueryMatcher(scan, scanInfo, columns, scanType, - Long.MAX_VALUE, earliestPutTs, oldestUnexpiredTS, null); + Long.MAX_VALUE, earliestPutTs, oldestUnexpiredTS, now, null); // In unit tests, the store could be null if (this.store != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 9fa430f..6cebc40 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -77,6 +77,7 @@ import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HRegionInfo; @@ -90,6 +91,7 @@ import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; @@ -134,6 +136,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.test.MetricsAssertHelper; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; @@ -5617,6 +5620,94 @@ public class TestHRegion { assertEquals(0, store.getStoreFileCount()); // no store files } + @Test + public void testTTLs() throws IOException { + IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge(); + EnvironmentEdgeManager.injectEdge(edge); + + final byte[] row = Bytes.toBytes("testRow"); + final byte[] col1 = Bytes.toBytes("1"); + final byte[] col2 = Bytes.toBytes("2"); + final byte[] col3 = Bytes.toBytes("3"); + final byte[] col4 = Bytes.toBytes("4"); + + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testTTLs")); + HColumnDescriptor hcd = new HColumnDescriptor(fam1); + hcd.setTimeToLive(10); + htd.addFamily(hcd); + + HRegion region = HRegion.createHRegion(new HRegionInfo(htd.getTableName(), + HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY), + TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd); + assertNotNull(region); + try { + long now = EnvironmentEdgeManager.currentTimeMillis(); + // Add a cell that will expire in 5 seconds via cell TTL + region.put(new Put(row).add(new KeyValue(row, fam1, col1, now, + HConstants.EMPTY_BYTE_ARRAY, new Tag[] { + // TTL tags specify ts in milliseconds + new Tag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) } ))); + // Add a cell that will expire after 10 seconds via family setting + region.put(new Put(row).add(fam1, col2, now, HConstants.EMPTY_BYTE_ARRAY)); + // Add a cell that will expire in 15 seconds via cell TTL + region.put(new Put(row).add(new KeyValue(row, fam1, col3, now + 10000 - 1, + HConstants.EMPTY_BYTE_ARRAY, new Tag[] { + // TTL tags specify ts in milliseconds + new Tag(TagType.TTL_TAG_TYPE, Bytes.toBytes(5000L)) } ))); + // Add a cell that will expire in 20 seconds via family setting + region.put(new Put(row).add(fam1, col4, now + 10000 - 1, HConstants.EMPTY_BYTE_ARRAY)); + + // Flush so we are sure store scanning gets this right + region.flushcache(); + + // A query at time T+0 should return all cells + Result r = region.get(new Get(row)); + assertNotNull(r.getValue(fam1, col1)); + assertNotNull(r.getValue(fam1, col2)); + assertNotNull(r.getValue(fam1, col3)); + assertNotNull(r.getValue(fam1, col4)); + + // Increment time to T+5 seconds + edge.incrementTime(5 * 1000); + + r = region.get(new Get(row)); + assertNull(r.getValue(fam1, col1)); + assertNotNull(r.getValue(fam1, col2)); + assertNotNull(r.getValue(fam1, col3)); + assertNotNull(r.getValue(fam1, col4)); + + // Increment time to T+10 seconds + edge.incrementTime(5 * 1000); + + r = region.get(new Get(row)); + assertNull(r.getValue(fam1, col1)); + assertNull(r.getValue(fam1, col2)); + assertNotNull(r.getValue(fam1, col3)); + assertNotNull(r.getValue(fam1, col4)); + + // Increment time to T+15 seconds + edge.incrementTime(5 * 1000); + + r = region.get(new Get(row)); + assertNull(r.getValue(fam1, col1)); + assertNull(r.getValue(fam1, col2)); + assertNull(r.getValue(fam1, col3)); + assertNotNull(r.getValue(fam1, col4)); + + // Increment time to T+20 seconds + edge.incrementTime(10 * 1000); + + r = region.get(new Get(row)); + assertNull(r.getValue(fam1, col1)); + assertNull(r.getValue(fam1, col2)); + assertNull(r.getValue(fam1, col3)); + assertNull(r.getValue(fam1, col4)); + + } finally { + HRegion.closeHRegion(region); + } + } + private static HRegion initHRegion(byte[] tableName, String callingMethod, byte[]... families) throws IOException { return initHRegion(tableName, callingMethod, HBaseConfiguration.create(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java index 4945ad1..39bc36d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java @@ -90,11 +90,12 @@ public class TestQueryMatcher extends HBaseTestCase { } - private void _testMatch_ExplicitColumns(Scan scan, List expected) throws IOException { - // 2,4,5 + private void _testMatch_ExplicitColumns(Scan scan, List expected) throws IOException { + long now = EnvironmentEdgeManager.currentTimeMillis(); + // 2,4,5 ScanQueryMatcher qm = new ScanQueryMatcher(scan, new ScanInfo(fam2, - 0, 1, ttl, false, 0, rowComparator), get.getFamilyMap().get(fam2), - EnvironmentEdgeManager.currentTimeMillis() - ttl); + 0, 1, ttl, false, 0, rowComparator), get.getFamilyMap().get(fam2), + now - ttl, now); List memstore = new ArrayList(); memstore.add(new KeyValue(row1, fam2, col1, 1, data)); @@ -174,9 +175,10 @@ public class TestQueryMatcher extends HBaseTestCase { expected.add(ScanQueryMatcher.MatchCode.INCLUDE); expected.add(ScanQueryMatcher.MatchCode.DONE); + long now = EnvironmentEdgeManager.currentTimeMillis(); ScanQueryMatcher qm = new ScanQueryMatcher(scan, new ScanInfo(fam2, 0, 1, ttl, false, 0, rowComparator), null, - EnvironmentEdgeManager.currentTimeMillis() - ttl); + now - ttl, now); List memstore = new ArrayList(); memstore.add(new KeyValue(row1, fam2, col1, 1, data)); @@ -230,7 +232,7 @@ public class TestQueryMatcher extends HBaseTestCase { long now = EnvironmentEdgeManager.currentTimeMillis(); ScanQueryMatcher qm = new ScanQueryMatcher(scan, new ScanInfo(fam2, 0, 1, testTTL, false, 0, rowComparator), get.getFamilyMap().get(fam2), - now - testTTL); + now - testTTL, now); KeyValue [] kvs = new KeyValue[] { new KeyValue(row1, fam2, col1, now-100, data), @@ -284,7 +286,7 @@ public class TestQueryMatcher extends HBaseTestCase { long now = EnvironmentEdgeManager.currentTimeMillis(); ScanQueryMatcher qm = new ScanQueryMatcher(scan, new ScanInfo(fam2, 0, 1, testTTL, false, 0, rowComparator), null, - now - testTTL); + now - testTTL, now); KeyValue [] kvs = new KeyValue[] { new KeyValue(row1, fam2, col1, now-100, data), @@ -342,7 +344,7 @@ public class TestQueryMatcher extends HBaseTestCase { NavigableSet cols = get.getFamilyMap().get(fam2); ScanQueryMatcher qm = new ScanQueryMatcher(scan, scanInfo, cols, Long.MAX_VALUE, - HConstants.OLDEST_TIMESTAMP, HConstants.OLDEST_TIMESTAMP, from, to, null); + HConstants.OLDEST_TIMESTAMP, HConstants.OLDEST_TIMESTAMP, now, from, to, null); List actual = new ArrayList(rows.length); byte[] prevRow = null; -- 1.8.5.2 (Apple Git-48)