diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index 22bffee..666b357 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -63,8 +63,8 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, final boolean cacheDataInL1) { - boolean isMetaBlock = buf.getBlockType().getCategory() != BlockCategory.DATA; - if (isMetaBlock || cacheDataInL1) { + boolean metaBlock = buf.getBlockType().getCategory() != BlockCategory.DATA; + if (metaBlock || cacheDataInL1) { lruCache.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1); } else { l2Cache.cacheBlock(cacheKey, buf, inMemory, false); @@ -81,12 +81,9 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { boolean repeat, boolean updateCacheMetrics) { // TODO: is there a hole here, or just awkwardness since in the lruCache getBlock // we end up calling l2Cache.getBlock. - if (lruCache.containsBlock(cacheKey)) { - return lruCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); - } - Cacheable result = l2Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); - - return result; + return lruCache.containsBlock(cacheKey)? + lruCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics): + l2Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java index c220b5c..592be70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java @@ -93,7 +93,7 @@ public class ScanQueryMatcher { /* row is not private for tests */ /** Row the query is on */ Cell curCell; - + /** * Oldest put in any of the involved store files * Used to decide whether it is ok to delete @@ -119,7 +119,7 @@ public class ScanQueryMatcher { * first column. * */ private boolean hasNullColumn = true; - + private RegionCoprocessorHost regionCoprocessorHost= null; // By default, when hbase.hstore.time.to.purge.deletes is 0ms, a delete @@ -140,12 +140,17 @@ public class ScanQueryMatcher { // currently influencing. This is because Puts, that this delete can // influence. may appear out of order. private final long timeToPurgeDeletes; - + private final boolean isUserScan; private final boolean isReversed; /** + * True if we are doing a Get Scan (every Get is a Scan). + */ + private final boolean getScan; + + /** * Construct a QueryMatcher for a scan * @param scan * @param scanInfo The store's immutable scan info @@ -154,8 +159,8 @@ public class ScanQueryMatcher { * @param earliestPutTs Earliest put seen in any of the store files. * @param oldestUnexpiredTS the oldest timestamp we are interested in, * based on TTL - * @param regionCoprocessorHost - * @throws IOException + * @param regionCoprocessorHost + * @throws IOException */ public ScanQueryMatcher(Scan scan, ScanInfo scanInfo, NavigableSet columns, ScanType scanType, long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, @@ -166,6 +171,7 @@ public class ScanQueryMatcher { } else { this.tr = timeRange; } + this.getScan = scan.isGetScan(); this.rowComparator = scanInfo.getComparator(); this.regionCoprocessorHost = regionCoprocessorHost; this.deletes = instantiateDeleteTracker(); @@ -234,8 +240,8 @@ public class ScanQueryMatcher { * @param now the current server time * @param dropDeletesFromRow The inclusive left bound of the range; can be EMPTY_START_ROW. * @param dropDeletesToRow The exclusive right bound of the range; can be EMPTY_END_ROW. - * @param regionCoprocessorHost - * @throws IOException + * @param regionCoprocessorHost + * @throws IOException */ public ScanQueryMatcher(Scan scan, ScanInfo scanInfo, NavigableSet columns, long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, long now, @@ -280,7 +286,7 @@ public class ScanQueryMatcher { * caused by a data corruption. */ public MatchCode match(Cell cell) throws IOException { - if (filter != null && filter.filterAllRemaining()) { + if (filter != null && filter.filterAllRemaining()) { return MatchCode.DONE_SCAN; } if (curCell != null) { @@ -324,7 +330,7 @@ public class ScanQueryMatcher { // check if the cell is expired by cell TTL if (HStore.isCellTTLExpired(cell, this.oldestUnexpiredTS, this.now)) { return MatchCode.SKIP; - } + } /* * The delete logic is pretty complicated now. @@ -359,10 +365,10 @@ public class ScanQueryMatcher { } // Can't early out now, because DelFam come before any other keys } - + if ((!isUserScan) && timeToPurgeDeletes > 0 - && (EnvironmentEdgeManager.currentTime() - timestamp) + && (EnvironmentEdgeManager.currentTime() - timestamp) <= timeToPurgeDeletes) { return MatchCode.INCLUDE; } else if (retainDeletesInOutput || mvccVersion > maxReadPointToTrackVersions) { @@ -503,22 +509,20 @@ public class ScanQueryMatcher { } } + /** + * @return Returns false if we know there are no more rows to be scanned (We've reached the + * stopRow or we are scanning on row only because this Scan is for a Get, etc. + */ public boolean moreRowsMayExistAfter(Cell kv) { - if (this.isReversed) { - if (rowComparator.compareRows(kv, stopRow, 0, stopRow.length) <= 0) { - return false; - } else { - return true; - } - } - if (!Bytes.equals(stopRow , HConstants.EMPTY_END_ROW) && - rowComparator.compareRows(kv, stopRow, 0, stopRow.length) >= 0) { - // KV >= STOPROW - // then NO there is nothing left. + // If a 'get' Scan -- we are doing a Get (every Get is a single-row Scan in implementation) -- + // then we are looking at one row only, the one specified in the Get coordinate. + if (this.getScan) { return false; - } else { - return true; } + return this.isReversed? + rowComparator.compareRows(kv, stopRow, 0, stopRow.length) > 0: + Bytes.equals(stopRow, HConstants.EMPTY_END_ROW) || + rowComparator.compareRows(kv, stopRow, 0, stopRow.length) < 0; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 2f0d284..8623839 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -650,10 +650,13 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner } /* - * See if we should actually SEEK or rather just SKIP to the next Cell. - * (see HBASE-13109) + * See if we should actually SEEK or rather just SKIP to the next Cell (see HBASE-13109) */ private ScanQueryMatcher.MatchCode optimize(ScanQueryMatcher.MatchCode qcode, Cell cell) { + // Currently this optimize method is for Scans, not Gets. If this Scan is a Get Scan -- + // all Gets are just Scans of a single row and are distingushed by having a flag set on the + // Scan instance -- then just skip out of this optimize. + if (this.scan.isGetScan()) return qcode; switch(qcode) { case INCLUDE_AND_SEEK_NEXT_COL: case SEEK_NEXT_COL: @@ -666,6 +669,12 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner break; } case INCLUDE_AND_SEEK_NEXT_ROW: + // Check before we try optimize. If we have hit the stopRow, time to bale out. + if (!matcher.moreRowsMayExistAfter(cell)) { + qcode = MatchCode.DONE; + break; + } + // Else fall through to the next case. case SEEK_NEXT_ROW: { Cell nextIndexedKey = getNextIndexedKey();