.../hadoop/hbase/io/HalfStoreFileReader.java | 12 +- .../hbase/mapreduce/LoadIncrementalHFiles.java | 3 +- .../hbase/regionserver/ReversedStoreScanner.java | 8 +- .../hadoop/hbase/regionserver/StoreFile.java | 17 ++- .../hadoop/hbase/regionserver/StoreFileInfo.java | 6 +- .../hadoop/hbase/regionserver/StoreScanner.java | 87 ++++++++++++-- .../hbase/client/TestBlockEvictionFromClient.java | 132 +++++++++++++++++++++ .../hadoop/hbase/io/TestHalfStoreFileReader.java | 7 +- .../hadoop/hbase/regionserver/TestStoreFile.java | 10 +- .../TestStoreFileScannerWithTagCompression.java | 3 +- 10 files changed, 247 insertions(+), 38 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java index 0dd7742..1eca959 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.util.Bytes; /** @@ -72,10 +73,10 @@ public class HalfStoreFileReader extends StoreFile.Reader { * @param conf Configuration * @throws IOException */ - public HalfStoreFileReader(final FileSystem fs, final Path p, + public HalfStoreFileReader(final FileSystem fs, final Path p, final StoreFileInfo storeFileInfo, final CacheConfig cacheConf, final Reference r, final Configuration conf) throws IOException { - super(fs, p, cacheConf, conf); + super(fs, p, storeFileInfo, cacheConf, conf); // This is not actual midkey for this half-file; its just border // around which we split top and bottom. Have to look in files to find // actual last and first keys for bottom and top halves. Half-files don't @@ -98,10 +99,11 @@ public class HalfStoreFileReader extends StoreFile.Reader { * @param conf Configuration * @throws IOException */ - public HalfStoreFileReader(final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, - long size, final CacheConfig cacheConf, final Reference r, final Configuration conf) + public HalfStoreFileReader(final FileSystem fs, final Path p, final StoreFileInfo fileInfo, + final FSDataInputStreamWrapper in, long size, final CacheConfig cacheConf, final Reference r, + final Configuration conf) throws IOException { - super(fs, p, in, size, cacheConf, conf); + super(fs, p, fileInfo, in, size, cacheConf, conf); // This is not actual midkey for this half-file; its just border // around which we split top and bottom. Have to look in files to find // actual last and first keys for bottom and top halves. Half-files don't diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index 44be2d3..17b704d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -831,7 +831,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool { HalfStoreFileReader halfReader = null; StoreFile.Writer halfWriter = null; try { - halfReader = new HalfStoreFileReader(fs, inFile, cacheConf, reference, conf); + StoreFileInfo fileinfo = new StoreFileInfo(conf, fs, inFile); + halfReader = new HalfStoreFileReader(fs, inFile, fileinfo, cacheConf, reference, conf); Map fileInfo = halfReader.loadFileInfo(); int blocksize = familyDescriptor.getBlocksize(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java index 0e1d90f..7cd69e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedStoreScanner.java @@ -75,11 +75,15 @@ class ReversedStoreScanner extends StoreScanner implements KeyValueScanner { // key does not exist, then to the start of the previous matching Row). if (CellUtil.matchingRow(seekKey, HConstants.EMPTY_START_ROW)) { for (KeyValueScanner scanner : scanners) { - scanner.seekToLastRow(); + if (scanner.peek() == null) { + scanner.seekToLastRow(); + } } } else { for (KeyValueScanner scanner : scanners) { - scanner.backwardSeek(seekKey); + if (scanner.peek() == null) { + scanner.backwardSeek(seekKey); + } } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 2b9d101..b32b562 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -1162,6 +1162,7 @@ public class StoreFile { private boolean bulkLoadResult = false; private KeyValue.KeyOnlyKeyValue lastBloomKeyOnlyKV = null; private boolean skipResetSeqId = true; + private StoreFileInfo fileInfo; // Counter that is incremented every time a scanner is created on the // store file. It is decremented when the scan on the store file is // done. @@ -1169,20 +1170,22 @@ public class StoreFile { // Indicates if the file got compacted private volatile boolean compactedAway = false; - public Reader(FileSystem fs, Path path, CacheConfig cacheConf, Configuration conf) - throws IOException { - reader = HFile.createReader(fs, path, cacheConf, conf); + public Reader(FileSystem fs, Path p, StoreFileInfo fileInfo, CacheConfig cacheConf, + Configuration conf) throws IOException { + reader = HFile.createReader(fs, p, cacheConf, conf); bloomFilterType = BloomType.NONE; + this.fileInfo = fileInfo; } void markCompactedAway() { this.compactedAway = true; } - public Reader(FileSystem fs, Path path, FSDataInputStreamWrapper in, long size, - CacheConfig cacheConf, Configuration conf) throws IOException { + public Reader(FileSystem fs, Path path, StoreFileInfo fileInfo, FSDataInputStreamWrapper in, + long size, CacheConfig cacheConf, Configuration conf) throws IOException { reader = HFile.createReader(fs, path, in, size, cacheConf, conf); bloomFilterType = BloomType.NONE; + this.fileInfo = fileInfo; } public void setReplicaStoreFile(boolean isPrimaryReplicaStoreFile) { @@ -1192,6 +1195,10 @@ public class StoreFile { return reader.isPrimaryReplicaReader(); } + public StoreFileInfo getStoreFileInfo() { + return this.fileInfo; + } + /** * ONLY USE DEFAULT CONSTRUCTOR FOR UNIT TESTS */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java index fd7f1c6..fb72e7b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java @@ -259,10 +259,10 @@ public class StoreFileInfo { } if (reader == null) { if (this.reference != null) { - reader = new HalfStoreFileReader(fs, this.getPath(), in, length, cacheConf, reference, - conf); + reader = new HalfStoreFileReader(fs, this.getPath(), this, in, length, cacheConf, reference, + conf); } else { - reader = new StoreFile.Reader(fs, status.getPath(), in, length, cacheConf, conf); + reader = new StoreFile.Reader(fs, status.getPath(), this, in, length, cacheConf, conf); } } if (this.coprocessorHost != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 44f07f7..028935e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.io.InterruptedIOException; import java.util.ArrayList; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.NavigableSet; import java.util.Set; @@ -88,7 +89,10 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // Collects all the KVHeap that are eagerly getting closed during the // course of a scan + // This will not be needed any more? protected Set heapsForDelayedClose = new HashSet(); + protected Set existingStoreFileInfos = new HashSet(); + protected List currentScanners = new ArrayList(); /** * The number of KVs seen by the scanner. Includes explicitly skipped KVs, but not @@ -194,7 +198,6 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // Pass columns to try to filter out unnecessary StoreFiles. List scanners = getScannersNoCompaction(); - // Seek all scanners to the start of the Row (or if the exact matching row // key does not exist, then to the start of the next matching Row). // Always check bloom filter to optimize the top row seek for delete @@ -207,11 +210,51 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // set rowOffset this.storeOffset = scan.getRowOffsetPerColumnFamily(); - + addCurrentScanners(scanners); // Combine all seeked scanners with a heap resetKVHeap(scanners, store.getComparator()); } + private List updatedStoreFiles(List scanners, + CellComparator cellComparator) { + List newScanners = new ArrayList(scanners.size()); + for (KeyValueScanner scanner : scanners) { + if (scanner.isFileScanner()) { + Iterator iterator = this.currentScanners.iterator(); + boolean newScanner = true; + while (iterator.hasNext()) { + KeyValueScanner currentScanner = iterator.next(); + if (currentScanner.isFileScanner()) { + if (((StoreFileScanner) scanner).getReader().getStoreFileInfo() + .equals(((StoreFileScanner) currentScanner).getReader().getStoreFileInfo())) { + newScanner = false; + // Add the current scanner because they would already been seeked + newScanners.add(currentScanner); + break; + } + } else { + // remove the existing memstoreScanner + iterator.remove(); + } + } + if (newScanner) { + // Add the new scanner on the flushed file + newScanners.add(scanner); + } + } else { + // Add the current memstore Scanner + newScanners.add(scanner); + } + } + // remove all the scanners and close the ones which are no longer in use + // so that those blocks can be returned + currentScanners.removeAll(newScanners); + for (KeyValueScanner scanner : currentScanners) { + scanner.close(); + } + return newScanners; + } + /** * Used for compactions.

* @@ -261,10 +304,9 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // Filter the list of scanners using Bloom filters, time range, TTL, etc. scanners = selectScannersFrom(scanners); - // Seek all scanners to the initial key seekScanners(scanners, matcher.getStartKey(), false, parallelSeekEnabled); - + addCurrentScanners(scanners); // Combine all seeked scanners with a heap resetKVHeap(scanners, store.getComparator()); } @@ -303,6 +345,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner } // Seek all scanners to the initial key seekScanners(scanners, matcher.getStartKey(), false, parallelSeekEnabled); + addCurrentScanners(scanners); resetKVHeap(scanners, scanInfo.getComparator()); } @@ -323,10 +366,11 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner * @param seekKey * @param isLazy true if using lazy seek * @param isParallelSeek true if using parallel seek + * @param updatedStoreFile * @throws IOException */ - protected void seekScanners(List scanners, - Cell seekKey, boolean isLazy, boolean isParallelSeek) + protected void seekScanners(List scanners, Cell seekKey, + boolean isLazy, boolean isParallelSeek) throws IOException { // Seek all scanners to the start of the Row (or if the exact matching row // key does not exist, then to the start of the next matching Row). @@ -334,17 +378,23 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // family marker. if (isLazy) { for (KeyValueScanner scanner : scanners) { - scanner.requestSeek(seekKey, false, true); + // Only seek in the new file + if (scanner.peek() == null) { + scanner.requestSeek(seekKey, false, true); + } } } else { if (!isParallelSeek) { long totalScannersSoughtBytes = 0; for (KeyValueScanner scanner : scanners) { if (totalScannersSoughtBytes >= maxRowSize) { - throw new RowTooBigException("Max row size allowed: " + maxRowSize - + ", but row is bigger than that"); + throw new RowTooBigException( + "Max row size allowed: " + maxRowSize + ", but row is bigger than that"); + } + if (scanner.peek() == null) { + // Only seek in the new file + scanner.seek(seekKey); } - scanner.seek(seekKey); Cell c = scanner.peek(); if (c != null) { totalScannersSoughtBytes += CellUtil.estimatedSerializedSizeOf(c); @@ -362,6 +412,12 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner heap = new KeyValueHeap(scanners, comparator); } + protected void addCurrentScanners(List scanners) { + for (KeyValueScanner scanner : scanners) { + this.currentScanners.add(scanner); + } + } + /** * Filters the given list of scanners using Bloom filter, time range, and * TTL. @@ -683,8 +739,6 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner //DebugPrint.println("SS updateReaders, topKey = " + lastTop); - // close scanners to old obsolete Store files - this.heapsForDelayedClose.add(this.heap);// Don't close now. Delay it till StoreScanner#close this.heap = null; // the re-seeks could be slow (access HDFS) free up memory ASAP } @@ -717,13 +771,20 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner /* When we have the scan object, should we not pass it to getScanners() * to get a limited set of scanners? We did so in the constructor and we * could have done it now by storing the scan object from the constructor */ - List scanners = getScannersNoCompaction(); + final boolean isCompaction = false; + boolean usePread = get || scanUsePread; + List scanners = store.getScanners(cacheBlocks, get, usePread, isCompaction, + matcher, scan.getStartRow(), scan.getStopRow(), this.readPt); + scanners = updatedStoreFiles(scanners, store.getComparator()); + scanners = selectScannersFrom(scanners); // Seek all scanners to the initial key seekScanners(scanners, lastTopKey, false, parallelSeekEnabled); // Combine all seeked scanners with a heap resetKVHeap(scanners, store.getComparator()); + // Update the currentScanners + this.currentScanners = scanners; // Reset the state of the Query Matcher and set to top row. // Only reset and call setRow if the row changes; avoids confusing the diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java index 6dedee2..ad9dbb9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java @@ -804,6 +804,138 @@ public class TestBlockEvictionFromClient { testScanWithCompactionInternals("testReverseScanWithCompaction", true); } + @Test + public void testBlockEvictionAfterHBASE13082WithCompactionAndFlush() + throws IOException, InterruptedException { + // do flush and scan in parallel + HTable table = null; + try { + latch = new CountDownLatch(1); + compactionLatch = new CountDownLatch(1); + TableName tableName = + TableName.valueOf("testBlockEvictionAfterHBASE13082WithCompactionAndFlush"); + // Create a table with block size as 1024 + table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, + CustomInnerRegionObserverWrapper.class.getName()); + // get the block cache and region + RegionLocator locator = table.getRegionLocator(); + String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName(); + Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions( + regionName); + Store store = region.getStores().iterator().next(); + CacheConfig cacheConf = store.getCacheConfig(); + cacheConf.setCacheDataOnWrite(true); + cacheConf.setEvictOnClose(true); + BlockCache cache = cacheConf.getBlockCache(); + + // insert data. 2 Rows are added + Put put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER, data); + table.put(put); + put = new Put(ROW1); + put.addColumn(FAMILY, QUALIFIER, data); + table.put(put); + assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data)); + // Should create one Hfile with 2 blocks + region.flush(true); + // read the data and expect same blocks, one new hit, no misses + int refCount = 0; + // Check how this miss is happening + // insert a second column, read the row, no new blocks, 3 new hits + byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER); + byte[] data2 = Bytes.add(data, data); + put = new Put(ROW); + put.addColumn(FAMILY, QUALIFIER2, data2); + table.put(put); + // flush, one new block + System.out.println("Flushing cache"); + region.flush(true); + Iterator iterator = cache.iterator(); + iterateBlockCache(cache, iterator); + // Create three sets of scan + ScanThread[] scanThreads = initiateScan(table, false); + Thread.sleep(100); + iterator = cache.iterator(); + boolean usedBlocksFound = false; + while (iterator.hasNext()) { + CachedBlock next = iterator.next(); + BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); + if (cache instanceof BucketCache) { + refCount = ((BucketCache) cache).getRefCount(cacheKey); + } else if (cache instanceof CombinedBlockCache) { + refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey); + } else { + continue; + } + if (refCount != 0) { + // Blocks will be with count 3 + assertEquals(NO_OF_THREADS, refCount); + usedBlocksFound = true; + } + } + // Make a put and do a flush + QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER); + data2 = Bytes.add(data, data); + put = new Put(ROW1); + put.addColumn(FAMILY, QUALIFIER2, data2); + table.put(put); + // flush, one new block + System.out.println("Flushing cache"); + region.flush(true); + assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound); + usedBlocksFound = false; + System.out.println("Compacting"); + assertEquals(3, store.getStorefilesCount()); + store.triggerMajorCompaction(); + region.compact(true); + waitForStoreFileCount(store, 1, 10000); // wait 10 seconds max + assertEquals(1, store.getStorefilesCount()); + // Even after compaction is done we will have some blocks that cannot + // be evicted this is because the scan is still referencing them + iterator = cache.iterator(); + while (iterator.hasNext()) { + CachedBlock next = iterator.next(); + BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset()); + if (cache instanceof BucketCache) { + refCount = ((BucketCache) cache).getRefCount(cacheKey); + } else if (cache instanceof CombinedBlockCache) { + refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey); + } else { + continue; + } + if (refCount != 0) { + // Blocks will be with count 3 as they are not yet cleared + assertEquals(NO_OF_THREADS, refCount); + usedBlocksFound = true; + } + } + assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound); + // Should not throw exception + compactionLatch.countDown(); + latch.countDown(); + for (ScanThread thread : scanThreads) { + thread.join(); + } + // by this time all blocks should have been evicted + iterator = cache.iterator(); + // Since a flush and compaction happened after a scan started + // we need to ensure that all the original blocks of the compacted file + // is also removed. + iterateBlockCache(cache, iterator); + Result r = table.get(new Get(ROW)); + assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data)); + assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2)); + // The gets would be working on new blocks + iterator = cache.iterator(); + iterateBlockCache(cache, iterator); + } finally { + if (table != null) { + table.close(); + } + } + + } + private void testScanWithCompactionInternals(String tableNameStr, boolean reversed) throws IOException, InterruptedException { HTable table = null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java index 0e5f08e..be00900 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.io.hfile.HFileScanner; +import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -119,7 +120,8 @@ public class TestHalfStoreFileReader { private void doTestOfScanAndReseek(Path p, FileSystem fs, Reference bottom, CacheConfig cacheConf) throws IOException { - final HalfStoreFileReader halfreader = new HalfStoreFileReader(fs, p, + StoreFileInfo fileInfo = new StoreFileInfo(TEST_UTIL.getConfiguration(), fs, p); + final HalfStoreFileReader halfreader = new HalfStoreFileReader(fs, p, fileInfo, cacheConf, bottom, TEST_UTIL.getConfiguration()); halfreader.loadFileInfo(); final HFileScanner scanner = halfreader.getScanner(false, false); @@ -219,7 +221,8 @@ public class TestHalfStoreFileReader { private Cell doTestOfSeekBefore(Path p, FileSystem fs, Reference bottom, Cell seekBefore, CacheConfig cacheConfig) throws IOException { - final HalfStoreFileReader halfreader = new HalfStoreFileReader(fs, p, + StoreFileInfo fileInfo = new StoreFileInfo(TEST_UTIL.getConfiguration(), fs, p); + final HalfStoreFileReader halfreader = new HalfStoreFileReader(fs, p, fileInfo, cacheConfig, bottom, TEST_UTIL.getConfiguration()); halfreader.loadFileInfo(); final HFileScanner scanner = halfreader.getScanner(false, false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java index e984c5d..628f447 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java @@ -497,8 +497,7 @@ public class TestStoreFile extends HBaseTestCase { writer.append(kv); } writer.close(); - - StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf); + StoreFile.Reader reader = new StoreFile.Reader(fs, f, null, cacheConf, conf); reader.loadFileInfo(); reader.loadBloomfilter(); StoreFileScanner scanner = reader.getStoreFileScanner(false, false); @@ -585,8 +584,7 @@ public class TestStoreFile extends HBaseTestCase { writer.append(kv); } writer.close(); - - StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf); + StoreFile.Reader reader = new StoreFile.Reader(fs, f, null, cacheConf, conf); reader.loadFileInfo(); reader.loadBloomfilter(); @@ -632,7 +630,7 @@ public class TestStoreFile extends HBaseTestCase { writeStoreFile(writer); writer.close(); - StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf); + StoreFile.Reader reader = new StoreFile.Reader(fs, f, null, cacheConf, conf); // Now do reseek with empty KV to position to the beginning of the file @@ -692,7 +690,7 @@ public class TestStoreFile extends HBaseTestCase { } writer.close(); - StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf); + StoreFile.Reader reader = new StoreFile.Reader(fs, f, null, cacheConf, conf); reader.loadFileInfo(); reader.loadBloomfilter(); StoreFileScanner scanner = reader.getStoreFileScanner(false, false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java index 1bcb7c9..c8bd4e8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java @@ -72,7 +72,8 @@ public class TestStoreFileScannerWithTagCompression { writeStoreFile(writer); writer.close(); - StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf); + StoreFileInfo info = new StoreFileInfo(conf, fs, f); + StoreFile.Reader reader = new StoreFile.Reader(fs, f, info, cacheConf, conf); StoreFileScanner s = reader.getStoreFileScanner(false, false); try { // Now do reseek with empty KV to position to the beginning of the file