From 2c423e53e0111c5c64a9e447618fe656c2effeb1 Mon Sep 17 00:00:00 2001 From: Apekshit Date: Fri, 15 Apr 2016 16:29:16 -0700 Subject: [PATCH] HBASE-15296 Break out writer and reader from StoreFile. Fully backward compatible.(Apekshit) Change-Id: I643ec7adda788749447888d9ea5f89ac70e53f6b --- .../hbase/coprocessor/BaseRegionObserver.java | 34 +- .../hadoop/hbase/coprocessor/RegionObserver.java | 35 +- .../hadoop/hbase/io/HalfStoreFileReader.java | 4 +- .../hadoop/hbase/mapreduce/HFileOutputFormat2.java | 9 +- .../hbase/mapreduce/LoadIncrementalHFiles.java | 6 +- .../regionserver/AbstractMultiFileWriter.java | 13 +- .../regionserver/DateTieredMultiFileWriter.java | 13 +- .../regionserver/DefaultStoreFileManager.java | 2 +- .../hbase/regionserver/DefaultStoreFlusher.java | 4 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 2 +- .../apache/hadoop/hbase/regionserver/HStore.java | 96 +- .../hbase/regionserver/RegionCoprocessorHost.java | 44 +- .../apache/hadoop/hbase/regionserver/Store.java | 37 +- .../hadoop/hbase/regionserver/StoreFile.java | 1124 ++------------------ .../hadoop/hbase/regionserver/StoreFileInfo.java | 10 +- .../hadoop/hbase/regionserver/StoreFileReader.java | 616 +++++++++++ .../hbase/regionserver/StoreFileScanner.java | 19 +- .../hadoop/hbase/regionserver/StoreFileWriter.java | 467 ++++++++ .../hadoop/hbase/regionserver/StoreFlusher.java | 2 +- .../hadoop/hbase/regionserver/StoreUtils.java | 2 +- .../hbase/regionserver/StripeMultiFileWriter.java | 15 +- .../hbase/regionserver/StripeStoreFileManager.java | 4 +- .../hbase/regionserver/StripeStoreFlusher.java | 5 +- .../compactions/AbstractMultiOutputCompactor.java | 4 +- .../compactions/CompactionRequest.java | 8 +- .../hbase/regionserver/compactions/Compactor.java | 9 +- .../regionserver/compactions/DefaultCompactor.java | 12 +- .../compactions/ExploringCompactionPolicy.java | 6 +- .../compactions/FIFOCompactionPolicy.java | 4 +- .../compactions/RatioBasedCompactionPolicy.java | 2 +- .../compactions/SortedCompactionPolicy.java | 2 +- .../compactions/StripeCompactionPolicy.java | 8 +- .../hbase/util/CompoundBloomFilterWriter.java | 3 +- .../hbase/coprocessor/SimpleRegionObserver.java | 37 +- .../hadoop/hbase/io/hfile/TestCacheOnWrite.java | 4 +- .../apache/hadoop/hbase/io/hfile/TestPrefetch.java | 4 +- .../io/hfile/TestSeekBeforeWithInlineBlocks.java | 6 +- .../hbase/regionserver/CreateRandomStoreFile.java | 2 +- .../hbase/regionserver/DataBlockEncodingTool.java | 2 +- .../regionserver/EncodedSeekPerformanceTest.java | 4 +- .../hadoop/hbase/regionserver/MockStoreFile.java | 13 +- .../regionserver/TestCacheOnWriteInSchema.java | 6 +- .../hadoop/hbase/regionserver/TestCompaction.java | 6 +- .../hbase/regionserver/TestCompactionPolicy.java | 2 +- .../regionserver/TestCompoundBloomFilter.java | 4 +- .../hbase/regionserver/TestFSErrorsExposed.java | 6 +- .../hadoop/hbase/regionserver/TestHRegion.java | 6 +- .../hbase/regionserver/TestMajorCompaction.java | 4 +- .../hbase/regionserver/TestRegionReplicas.java | 2 +- .../hbase/regionserver/TestScanWithBloomError.java | 2 +- .../hadoop/hbase/regionserver/TestStore.java | 11 +- .../hadoop/hbase/regionserver/TestStoreFile.java | 89 +- .../TestStoreFileScannerWithTagCompression.java | 6 +- .../hbase/regionserver/TestStripeStoreEngine.java | 2 +- .../compactions/MockStoreFileGenerator.java | 6 +- .../compactions/PerfTestCompactionPolicies.java | 2 +- .../regionserver/compactions/TestCompactor.java | 18 +- .../compactions/TestDateTieredCompactor.java | 2 +- .../compactions/TestStripeCompactionPolicy.java | 19 +- .../compactions/TestStripeCompactor.java | 2 +- .../security/visibility/TestVisibilityLabels.java | 3 +- 61 files changed, 1591 insertions(+), 1300 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java index 1bf7449..67cb550 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java @@ -55,7 +55,7 @@ import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; -import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; +import org.apache.hadoop.hbase.regionserver.StoreFileReader; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.wal.WALKey; @@ -488,17 +488,41 @@ public class BaseRegionObserver implements RegionObserver { return hasLoaded; } + /** + * @deprecated Since 1.3. Removed in 2.0. Use {@link #preStoreFileReaderOpen(ObserverContext, + * FileSystem, Path, FSDataInputStreamWrapper, long, CacheConfig, Reference, StoreFileReader)}. + */ + @Deprecated + @Override + public StoreFile.Reader preStoreFileReaderOpen(ObserverContext ctx, + final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, long size, + final CacheConfig cacheConf, final Reference r, StoreFile.Reader reader) throws IOException { + return reader; + } + + /** + * @deprecated Since 1.3. Removed in 2.0. Use {@link #postStoreFileReaderOpen(ObserverContext, + * FileSystem, Path, FSDataInputStreamWrapper, long, CacheConfig, Reference, StoreFileReader)}. + */ + @Deprecated + @Override + public StoreFile.Reader postStoreFileReaderOpen(ObserverContext ctx, + final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, long size, + final CacheConfig cacheConf, final Reference r, StoreFile.Reader reader) throws IOException { + return reader; + } + @Override - public Reader preStoreFileReaderOpen(ObserverContext ctx, + public StoreFileReader preStoreFileReaderOpen(ObserverContext ctx, FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, - Reference r, Reader reader) throws IOException { + Reference r, StoreFileReader reader) throws IOException { return reader; } @Override - public Reader postStoreFileReaderOpen(ObserverContext ctx, + public StoreFileReader postStoreFileReaderOpen(ObserverContext ctx, FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, - Reference r, Reader reader) throws IOException { + Reference r, StoreFileReader reader) throws IOException { return reader; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index 8c5c15a..89c48d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileReader; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.wal.WALKey; @@ -1199,43 +1200,57 @@ public interface RegionObserver extends Coprocessor { List> familyPaths, boolean hasLoaded) throws IOException; /** + * @deprecated Since 1.3. Removed in 2.0. Use {@link #preStoreFileReaderOpen(ObserverContext, + * FileSystem, Path, FSDataInputStreamWrapper, long, CacheConfig, Reference, StoreFileReader)}. + */ + @Deprecated + StoreFile.Reader preStoreFileReaderOpen(final ObserverContext ctx, + final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, long size, + final CacheConfig cacheConf, final Reference r, StoreFile.Reader reader) throws IOException; + + /** + * @deprecated Since 1.3. Removed in 2.0. Use {@link #postStoreFileReaderOpen(ObserverContext, + * FileSystem, Path, FSDataInputStreamWrapper, long, CacheConfig, Reference, StoreFileReader)}. + */ + @Deprecated + StoreFile.Reader postStoreFileReaderOpen(final ObserverContext ctx, + final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, long size, + final CacheConfig cacheConf, final Reference r, StoreFile.Reader reader) throws IOException; + + /** * Called before creation of Reader for a store file. * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no * effect in this hook. - * + * * @param ctx the environment provided by the region server * @param fs fileystem to read from * @param p path to the file * @param in {@link FSDataInputStreamWrapper} * @param size Full size of the file - * @param cacheConf * @param r original reference file. This will be not null only when reading a split file. * @param reader the base reader, if not {@code null}, from previous RegionObserver in the chain * @return a Reader instance to use instead of the base reader if overriding * default behavior, null otherwise - * @throws IOException */ - StoreFile.Reader preStoreFileReaderOpen(final ObserverContext ctx, + StoreFileReader preStoreFileReaderOpen(final ObserverContext ctx, final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, long size, - final CacheConfig cacheConf, final Reference r, StoreFile.Reader reader) throws IOException; + final CacheConfig cacheConf, final Reference r, StoreFileReader reader) throws IOException; /** * Called after the creation of Reader for a store file. - * + * * @param ctx the environment provided by the region server * @param fs fileystem to read from * @param p path to the file * @param in {@link FSDataInputStreamWrapper} * @param size Full size of the file - * @param cacheConf * @param r original reference file. This will be not null only when reading a split file. * @param reader the base reader instance * @return The reader to use - * @throws IOException */ - StoreFile.Reader postStoreFileReaderOpen(final ObserverContext ctx, + StoreFileReader postStoreFileReaderOpen(final ObserverContext ctx, final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, long size, - final CacheConfig cacheConf, final Reference r, StoreFile.Reader reader) throws IOException; + final CacheConfig cacheConf, final Reference r, StoreFileReader reader) throws IOException; /** * Called after a new cell has been created during an increment operation, but before diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java index ed2e925..1707df4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFileScanner; -import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileReader; import org.apache.hadoop.hbase.util.Bytes; /** @@ -50,7 +50,7 @@ import org.apache.hadoop.hbase.util.Bytes; *

This file is not splitable. Calls to {@link #midkey()} return null. */ @InterfaceAudience.Private -public class HalfStoreFileReader extends StoreFile.Reader { +public class HalfStoreFileReader extends StoreFileReader { private static final Log LOG = LogFactory.getLog(HalfStoreFileReader.class); final boolean top; // This is the key we split around. Its the first possible entry on a row: diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index 616bb72..eae998b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -65,6 +65,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.SequenceFile; @@ -311,12 +312,12 @@ public class HFileOutputFormat2 if (null == favoredNodes) { wl.writer = - new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs) + new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), fs) .withOutputDir(familydir).withBloomType(bloomType) .withComparator(KeyValue.COMPARATOR).withFileContext(hFileContext).build(); } else { wl.writer = - new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), new HFileSystem(fs)) + new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), new HFileSystem(fs)) .withOutputDir(familydir).withBloomType(bloomType) .withComparator(KeyValue.COMPARATOR).withFileContext(hFileContext) .withFavoredNodes(favoredNodes).build(); @@ -326,7 +327,7 @@ public class HFileOutputFormat2 return wl; } - private void close(final StoreFile.Writer w) throws IOException { + private void close(final StoreFileWriter w) throws IOException { if (w != null) { w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis())); @@ -356,7 +357,7 @@ public class HFileOutputFormat2 */ static class WriterLength { long written = 0; - StoreFile.Writer writer = null; + StoreFileWriter writer = null; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index 07059bc..13e99a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -89,8 +89,8 @@ import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HStore; -import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint; import org.apache.hadoop.hbase.security.token.FsDelegationToken; @@ -931,7 +931,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { FileSystem fs = inFile.getFileSystem(conf); CacheConfig cacheConf = new CacheConfig(conf); HalfStoreFileReader halfReader = null; - StoreFile.Writer halfWriter = null; + StoreFileWriter halfWriter = null; try { halfReader = new HalfStoreFileReader(fs, inFile, cacheConf, reference, conf); Map fileInfo = halfReader.loadFileInfo(); @@ -947,7 +947,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { .withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()) .withIncludesTags(true) .build(); - halfWriter = new StoreFile.WriterBuilder(conf, cacheConf, + halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs) .withFilePath(outFile) .withBloomType(bloomFilterType) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java index 4987c59..a4e0285 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java @@ -26,7 +26,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.regionserver.StoreFile.Writer; import org.apache.hadoop.hbase.regionserver.compactions.Compactor.CellSink; /** @@ -44,7 +43,7 @@ public abstract class AbstractMultiFileWriter implements CellSink { protected StoreScanner sourceScanner; public interface WriterFactory { - public StoreFile.Writer createWriter() throws IOException; + public StoreFileWriter createWriter() throws IOException; } /** @@ -66,13 +65,13 @@ public abstract class AbstractMultiFileWriter implements CellSink { */ public List commitWriters(long maxSeqId, boolean majorCompaction) throws IOException { preCommitWriters(); - Collection writers = this.writers(); + Collection writers = this.writers(); if (LOG.isDebugEnabled()) { LOG.debug("Commit " + writers.size() + " writers, maxSeqId=" + maxSeqId + ", majorCompaction=" + majorCompaction); } List paths = new ArrayList(); - for (Writer writer : writers) { + for (StoreFileWriter writer : writers) { if (writer == null) { continue; } @@ -89,7 +88,7 @@ public abstract class AbstractMultiFileWriter implements CellSink { */ public List abortWriters() { List paths = new ArrayList(); - for (StoreFile.Writer writer : writers()) { + for (StoreFileWriter writer : writers()) { try { if (writer != null) { paths.add(writer.getPath()); @@ -102,7 +101,7 @@ public abstract class AbstractMultiFileWriter implements CellSink { return paths; } - protected abstract Collection writers(); + protected abstract Collection writers(); /** * Subclasses override this method to be called at the end of a successful sequence of append; all @@ -115,6 +114,6 @@ public abstract class AbstractMultiFileWriter implements CellSink { * Subclasses override this method to be called before we close the give writer. Usually you can * append extra metadata to the writer. */ - protected void preCloseWriter(StoreFile.Writer writer) throws IOException { + protected void preCloseWriter(StoreFileWriter writer) throws IOException { } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java index f0bd444..2cea92f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java @@ -26,7 +26,6 @@ import java.util.TreeMap; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.regionserver.StoreFile.Writer; /** * class for cell sink that separates the provided cells into multiple files for date tiered @@ -35,8 +34,8 @@ import org.apache.hadoop.hbase.regionserver.StoreFile.Writer; @InterfaceAudience.Private public class DateTieredMultiFileWriter extends AbstractMultiFileWriter { - private final NavigableMap lowerBoundary2Writer - = new TreeMap(); + private final NavigableMap lowerBoundary2Writer + = new TreeMap(); private final boolean needEmptyFile; @@ -53,8 +52,8 @@ public class DateTieredMultiFileWriter extends AbstractMultiFileWriter { @Override public void append(Cell cell) throws IOException { - Map.Entry entry = lowerBoundary2Writer.floorEntry(cell.getTimestamp()); - StoreFile.Writer writer = entry.getValue(); + Map.Entry entry = lowerBoundary2Writer.floorEntry(cell.getTimestamp()); + StoreFileWriter writer = entry.getValue(); if (writer == null) { writer = writerFactory.createWriter(); lowerBoundary2Writer.put(entry.getKey(), writer); @@ -63,7 +62,7 @@ public class DateTieredMultiFileWriter extends AbstractMultiFileWriter { } @Override - protected Collection writers() { + protected Collection writers() { return lowerBoundary2Writer.values(); } @@ -72,7 +71,7 @@ public class DateTieredMultiFileWriter extends AbstractMultiFileWriter { if (!needEmptyFile) { return; } - for (StoreFile.Writer writer : lowerBoundary2Writer.values()) { + for (StoreFileWriter writer : lowerBoundary2Writer.values()) { if (writer != null) { return; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java index a9b3d0f..9e81285 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java @@ -199,7 +199,7 @@ class DefaultStoreFileManager implements StoreFileManager { // 2) Files that are not the latest can't become one due to (1), so the rest are fair game. for (int i = 0; i < files.size() - 1; ++i) { StoreFile sf = files.get(i); - long fileTs = sf.getReader().getMaxTimestamp(); + long fileTs = sf.getStoreFileReader().getMaxTimestamp(); if (fileTs < maxTs && !filesCompacting.contains(sf)) { LOG.info("Found an expired store file: " + sf.getPath() + " whose maxTimeStamp is " + fileTs + ", which is below " + maxTs); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java index 935813c..275b6bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java @@ -57,14 +57,14 @@ public class DefaultStoreFlusher extends StoreFlusher { return result; // NULL scanner returned from coprocessor hooks means skip normal processing } - StoreFile.Writer writer; + StoreFileWriter writer; try { // TODO: We can fail in the below block before we complete adding this flush to // list of store files. Add cleanup of anything put on filesystem if we fail. synchronized (flushLock) { status.setStatus("Flushing " + store + ": creating writer"); // Write the map out to the disk - writer = store.createWriterInTmp(cellsCount, store.getFamily().getCompression(), + writer = store.createWriterInTmpDir(cellsCount, store.getFamily().getCompression(), /* isCompaction = */ false, /* includeMVCCReadpoint = */ true, /* includesTags = */ snapshot.isTagsPresent(), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 204c729..5b5fcdc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -1670,7 +1670,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi Collection storeFiles = store.getStorefiles(); if (storeFiles == null) continue; for (StoreFile file : storeFiles) { - StoreFile.Reader sfReader = file.getReader(); + StoreFileReader sfReader = file.getStoreFileReader(); if (sfReader == null) continue; HFile.Reader reader = sfReader.getHFileReader(); if (reader == null) continue; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index db66641..1d78073 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -531,9 +531,9 @@ public class HStore implements Store { Future future = completionService.take(); StoreFile storeFile = future.get(); if (storeFile != null) { - long length = storeFile.getReader().length(); + long length = storeFile.getStoreFileReader().length(); this.storeSize += length; - this.totalUncompressedBytes += storeFile.getReader().getTotalUncompressedBytes(); + this.totalUncompressedBytes += storeFile.getStoreFileReader().getTotalUncompressedBytes(); if (LOG.isDebugEnabled()) { LOG.debug("loaded " + storeFile.toStringDetailed()); } @@ -649,7 +649,7 @@ public class HStore implements Store { info.setRegionCoprocessorHost(this.region.getCoprocessorHost()); StoreFile storeFile = new StoreFile(this.getFileSystem(), info, this.conf, this.cacheConf, this.family.getBloomFilterType()); - StoreFile.Reader r = storeFile.createReader(); + StoreFileReader r = storeFile.createStoreFileReader(); r.setReplicaStoreFile(isPrimaryReplicaStore()); return storeFile; } @@ -797,7 +797,7 @@ public class HStore implements Store { } private void bulkLoadHFile(StoreFile sf) throws IOException { - StoreFile.Reader r = sf.getReader(); + StoreFileReader r = sf.getStoreFileReader(); this.storeSize += r.length(); this.totalUncompressedBytes += r.getTotalUncompressedBytes(); @@ -965,7 +965,7 @@ public class HStore implements Store { status.setStatus("Flushing " + this + ": reopening flushed file"); StoreFile sf = createStoreFileAndReader(dstPath); - StoreFile.Reader r = sf.getReader(); + StoreFileReader r = sf.getStoreFileReader(); this.storeSize += r.length(); this.totalUncompressedBytes += r.getTotalUncompressedBytes(); @@ -977,27 +977,49 @@ public class HStore implements Store { return sf; } + /** + * {@inheritDoc} + */ + @Deprecated @Override public StoreFile.Writer createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag) throws IOException { - return createWriterInTmp(maxKeyCount, compression, isCompaction, includeMVCCReadpoint, - includesTag, false); + return (StoreFile.Writer) createWriterInTmpDir(maxKeyCount, compression, isCompaction, + includeMVCCReadpoint, includesTag); } - /* - * @param maxKeyCount - * @param compression Compression algorithm to use - * @param isCompaction whether we are creating a new file in a compaction - * @param includesMVCCReadPoint - whether to include MVCC or not - * @param includesTag - includesTag or not - * @return Writer for a new StoreFile in the tmp dir. + /** + * {@inheritDoc} */ @Override + @Deprecated public StoreFile.Writer createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag, - boolean shouldDropBehind) + boolean shouldDropBehind) throws IOException { + return (StoreFile.Writer) createWriterInTmpDir(maxKeyCount, compression, isCompaction, + includeMVCCReadpoint, includesTag, shouldDropBehind); + } + + /** + * {@inheritDoc} + */ + @Override + public StoreFileWriter createWriterInTmpDir( + long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, + boolean includeMVCCReadpoint, boolean includesTag) throws IOException { + return createWriterInTmpDir(maxKeyCount, compression, isCompaction, includeMVCCReadpoint, + includesTag, false); + } + + /** + * {@inheritDoc} + */ + @Override + public StoreFileWriter createWriterInTmpDir( + long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, + boolean includeMVCCReadpoint, boolean includesTag, boolean shouldDropBehind) throws IOException { final CacheConfig writerCacheConf; if (isCompaction) { @@ -1014,7 +1036,7 @@ public class HStore implements Store { } HFileContext hFileContext = createFileContext(compression, includeMVCCReadpoint, includesTag, cryptoContext); - StoreFile.Writer w = new StoreFile.WriterBuilder(conf, writerCacheConf, + StoreFileWriter w = new StoreFileWriter.Builder(conf, writerCacheConf, this.getFileSystem()) .withFilePath(fs.createTempName()) .withComparator(comparator) @@ -1077,7 +1099,7 @@ public class HStore implements Store { if (LOG.isTraceEnabled()) { long totalSize = 0; for (StoreFile sf : sfs) { - totalSize += sf.getReader().length(); + totalSize += sf.getStoreFileReader().length(); } String traceMessage = "FLUSH time,count,size,store size,store files [" + EnvironmentEdgeManager.currentTime() + "," + sfs.size() + "," + totalSize @@ -1277,7 +1299,7 @@ public class HStore implements Store { } for (StoreFile sf : sfs) { - outputBytes += sf.getReader().length(); + outputBytes += sf.getStoreFileReader().length(); } // At this point the store will use new files for all new scanners. @@ -1391,7 +1413,7 @@ public class HStore implements Store { for (StoreFile sf: sfs) { message.append(sf.getPath().getName()); message.append("(size="); - message.append(TraditionalBinaryPrefix.long2String(sf.getReader().length(), "", 1)); + message.append(TraditionalBinaryPrefix.long2String(sf.getStoreFileReader().length(), "", 1)); message.append("), "); } } @@ -1406,7 +1428,7 @@ public class HStore implements Store { int fileCount = storeEngine.getStoreFileManager().getStorefileCount(); long resultSize = 0; for (StoreFile sf : sfs) { - resultSize += sf.getReader().length(); + resultSize += sf.getStoreFileReader().length(); } String traceMessage = "COMPACTION start,end,size out,files in,files out,store size," + "store files [" + compactionStartTime + "," + now + "," + resultSize + "," @@ -1552,7 +1574,7 @@ public class HStore implements Store { public boolean isMajorCompaction() throws IOException { for (StoreFile sf : this.storeEngine.getStoreFileManager().getStorefiles()) { // TODO: what are these reader checks all over the place? - if (sf.getReader() == null) { + if (sf.getStoreFileReader() == null) { LOG.debug("StoreFile " + sf + " has null Reader"); return false; } @@ -1816,7 +1838,7 @@ public class HStore implements Store { this.storeSize = 0L; this.totalUncompressedBytes = 0L; for (StoreFile hsf : this.storeEngine.getStoreFileManager().getStorefiles()) { - StoreFile.Reader r = hsf.getReader(); + StoreFileReader r = hsf.getStoreFileReader(); if (r == null) { LOG.warn("StoreFile " + hsf + " has a null Reader"); continue; @@ -1925,7 +1947,7 @@ public class HStore implements Store { private boolean rowAtOrBeforeFromStoreFile(final StoreFile f, final GetClosestRowBeforeTracker state) throws IOException { - StoreFile.Reader r = f.getReader(); + StoreFileReader r = f.getStoreFileReader(); if (r == null) { LOG.warn("StoreFile " + f + " has a null Reader"); return false; @@ -2115,7 +2137,7 @@ public class HStore implements Store { public long getMaxStoreFileAge() { long earliestTS = Long.MAX_VALUE; for (StoreFile s: this.storeEngine.getStoreFileManager().getStorefiles()) { - StoreFile.Reader r = s.getReader(); + StoreFileReader r = s.getStoreFileReader(); if (r == null) { LOG.warn("StoreFile " + s + " has a null Reader"); continue; @@ -2134,7 +2156,7 @@ public class HStore implements Store { public long getMinStoreFileAge() { long latestTS = 0; for (StoreFile s: this.storeEngine.getStoreFileManager().getStorefiles()) { - StoreFile.Reader r = s.getReader(); + StoreFileReader r = s.getStoreFileReader(); if (r == null) { LOG.warn("StoreFile " + s + " has a null Reader"); continue; @@ -2153,7 +2175,7 @@ public class HStore implements Store { public long getAvgStoreFileAge() { long sum = 0, count = 0; for (StoreFile s: this.storeEngine.getStoreFileManager().getStorefiles()) { - StoreFile.Reader r = s.getReader(); + StoreFileReader r = s.getStoreFileReader(); if (r == null) { LOG.warn("StoreFile " + s + " has a null Reader"); continue; @@ -2203,7 +2225,7 @@ public class HStore implements Store { public long getStorefilesSize() { long size = 0; for (StoreFile s: this.storeEngine.getStoreFileManager().getStorefiles()) { - StoreFile.Reader r = s.getReader(); + StoreFileReader r = s.getStoreFileReader(); if (r == null) { LOG.warn("StoreFile " + s + " has a null Reader"); continue; @@ -2217,7 +2239,7 @@ public class HStore implements Store { public long getStorefilesIndexSize() { long size = 0; for (StoreFile s: this.storeEngine.getStoreFileManager().getStorefiles()) { - StoreFile.Reader r = s.getReader(); + StoreFileReader r = s.getStoreFileReader(); if (r == null) { LOG.warn("StoreFile " + s + " has a null Reader"); continue; @@ -2231,7 +2253,7 @@ public class HStore implements Store { public long getTotalStaticIndexSize() { long size = 0; for (StoreFile s : this.storeEngine.getStoreFileManager().getStorefiles()) { - StoreFile.Reader r = s.getReader(); + StoreFileReader r = s.getStoreFileReader(); if (r == null) { continue; } @@ -2244,7 +2266,7 @@ public class HStore implements Store { public long getTotalStaticBloomSize() { long size = 0; for (StoreFile s : this.storeEngine.getStoreFileManager().getStorefiles()) { - StoreFile.Reader r = s.getReader(); + StoreFileReader r = s.getStoreFileReader(); if (r == null) { continue; } @@ -2386,7 +2408,7 @@ public class HStore implements Store { for (Path storeFilePath : tempFiles) { try { StoreFile sf = HStore.this.commitFile(storeFilePath, cacheFlushSeqNum, status); - outputFileSize += sf.getReader().length(); + outputFileSize += sf.getStoreFileReader().length(); storeFiles.add(sf); } catch (IOException ex) { LOG.error("Failed to commit store file " + storeFilePath, ex); @@ -2446,13 +2468,13 @@ public class HStore implements Store { StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), file); StoreFile storeFile = createStoreFileAndReader(storeFileInfo); storeFiles.add(storeFile); - HStore.this.storeSize += storeFile.getReader().length(); - HStore.this.totalUncompressedBytes += storeFile.getReader().getTotalUncompressedBytes(); + HStore.this.storeSize += storeFile.getStoreFileReader().length(); + HStore.this.totalUncompressedBytes += storeFile.getStoreFileReader().getTotalUncompressedBytes(); if (LOG.isInfoEnabled()) { LOG.info("Region: " + HStore.this.getRegionInfo().getEncodedName() + - " added " + storeFile + ", entries=" + storeFile.getReader().getEntries() + - ", sequenceid=" + + storeFile.getReader().getSequenceID() + - ", filesize=" + StringUtils.humanReadableInt(storeFile.getReader().length())); + " added " + storeFile + ", entries=" + storeFile.getStoreFileReader().getEntries() + + ", sequenceid=" + + storeFile.getStoreFileReader().getSequenceID() + + ", filesize=" + StringUtils.humanReadableInt(storeFile.getStoreFileReader().length())); } } @@ -2645,7 +2667,7 @@ public class HStore implements Store { for (final StoreFile file : compactedfiles) { synchronized (file) { try { - StoreFile.Reader r = file.getReader(); + StoreFileReader r = file.getStoreFileReader(); if (r == null) { if (LOG.isDebugEnabled()) { LOG.debug("The file " + file + " was closed but still not archived."); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 814370c..b0f7133 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -1563,17 +1563,33 @@ public class RegionCoprocessorHost * @param p path to the file * @param in {@link FSDataInputStreamWrapper} * @param size Full size of the file - * @param cacheConf * @param r original reference file. This will be not null only when reading a split file. * @return a Reader instance to use instead of the base reader if overriding * default behavior, null otherwise - * @throws IOException + * @deprecated Since 1.3. Removed in 2.0. Use {@link #preStoreFileOpenReader(FileSystem, Path, + * FSDataInputStreamWrapper, long, CacheConfig, Reference)}. */ + @Deprecated public StoreFile.Reader preStoreFileReaderOpen(final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf, final Reference r) throws IOException { + return (StoreFile.Reader) preStoreFileOpenReader(fs, p, in, size, cacheConf, r); + } + + /** + * @param fs fileystem to read from + * @param p path to the file + * @param in {@link FSDataInputStreamWrapper} + * @param size Full size of the file + * @param r original reference file. This will be not null only when reading a split file. + * @return a Reader instance to use instead of the base reader if overriding + * default behavior, null otherwise + */ + public StoreFileReader preStoreFileOpenReader(final FileSystem fs, final Path p, + final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf, + final Reference r) throws IOException { return execOperationWithResult(null, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { + coprocessors.isEmpty() ? null : new RegionOperationWithResult() { @Override public void call(RegionObserver oserver, ObserverContext ctx) throws IOException { @@ -1587,17 +1603,33 @@ public class RegionCoprocessorHost * @param p path to the file * @param in {@link FSDataInputStreamWrapper} * @param size Full size of the file - * @param cacheConf * @param r original reference file. This will be not null only when reading a split file. * @param reader the base reader instance * @return The reader to use - * @throws IOException + * @deprecated Since 1.3. Removed in 2.0. Use {@link #postStoreFileOpenReader(FileSystem, Path, + * FSDataInputStreamWrapper, long, CacheConfig, Reference, StoreFileReader)}. */ + @Deprecated public StoreFile.Reader postStoreFileReaderOpen(final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf, final Reference r, final StoreFile.Reader reader) throws IOException { + return (StoreFile.Reader) postStoreFileOpenReader(fs, p, in, size, cacheConf, r, reader); + } + + /** + * @param fs fileystem to read from + * @param p path to the file + * @param in {@link FSDataInputStreamWrapper} + * @param size Full size of the file + * @param r original reference file. This will be not null only when reading a split file. + * @param reader the base reader instance + * @return The reader to use + */ + public StoreFileReader postStoreFileOpenReader(final FileSystem fs, final Path p, + final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf, + final Reference r, final StoreFileReader reader) throws IOException { return execOperationWithResult(reader, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { + coprocessors.isEmpty() ? null : new RegionOperationWithResult() { @Override public void call(RegionObserver oserver, ObserverContext ctx) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index dec27ad..b2e5e6c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -180,12 +180,14 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf /** - * @param maxKeyCount * @param compression Compression algorithm to use * @param isCompaction whether we are creating a new file in a compaction * @param includeMVCCReadpoint whether we should out the MVCC readpoint * @return Writer for a new StoreFile in the tmp dir. + * @deprecated Since 1.3. Removed in 2.0. Use {@link #createWriterInTmpDir(long, + * Compression.Algorithm, boolean, boolean, boolean)} instead. */ + @Deprecated StoreFile.Writer createWriterInTmp( long maxKeyCount, Compression.Algorithm compression, @@ -195,13 +197,15 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf ) throws IOException; /** - * @param maxKeyCount * @param compression Compression algorithm to use * @param isCompaction whether we are creating a new file in a compaction * @param includeMVCCReadpoint whether we should out the MVCC readpoint * @param shouldDropBehind should the writer drop caches behind writes * @return Writer for a new StoreFile in the tmp dir. + * @deprecated Since 1.3. Removed in 2.0. Use {@link #createWriterInTmpDir(long, + * Compression.Algorithm, boolean, boolean, boolean, boolean)} instead. */ + @Deprecated StoreFile.Writer createWriterInTmp( long maxKeyCount, Compression.Algorithm compression, @@ -211,8 +215,35 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf boolean shouldDropBehind ) throws IOException; + /** + * @param compression Compression algorithm to use + * @param isCompaction whether we are creating a new file in a compaction + * @param includeMVCCReadpoint whether we should out the MVCC readpoint + * @return Writer for a new StoreFile in the tmp dir. + */ + StoreFileWriter createWriterInTmpDir( + long maxKeyCount, + Compression.Algorithm compression, + boolean isCompaction, + boolean includeMVCCReadpoint, + boolean includesTags + ) throws IOException; - + /** + * @param compression Compression algorithm to use + * @param isCompaction whether we are creating a new file in a compaction + * @param includeMVCCReadpoint whether we should out the MVCC readpoint + * @param shouldDropBehind should the writer drop caches behind writes + * @return Writer for a new StoreFile in the tmp dir. + */ + StoreFileWriter createWriterInTmpDir( + long maxKeyCount, + Compression.Algorithm compression, + boolean isCompaction, + boolean includeMVCCReadpoint, + boolean includesTags, + boolean shouldDropBehind + ) throws IOException; // Compaction oriented methods diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 845a8d2..b029bfa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -18,63 +18,46 @@ */ package org.apache.hadoop.hbase.regionserver; -import java.io.DataInput; +import org.apache.hadoop.hbase.util.Bytes; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Function; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Ordering; + import java.io.IOException; import java.net.InetSocketAddress; -import java.nio.ByteBuffer; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.Map; -import java.util.SortedSet; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; -import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; -import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.HFileWriterV2; -import org.apache.hadoop.hbase.regionserver.compactions.Compactor; -import org.apache.hadoop.hbase.util.BloomFilter; import org.apache.hadoop.hbase.util.BloomFilterFactory; -import org.apache.hadoop.hbase.util.BloomFilterWriter; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.io.WritableUtils; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Function; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Ordering; /** * A Store data file. Stores usually have one or more of these files. They * are produced by flushing the memstore to disk. To - * create, instantiate a writer using {@link StoreFile.WriterBuilder} + * create, instantiate a writer using {@link StoreFileWriter.Builder} * and append data. Be sure to add any metadata before calling close on the * Writer (Use the appendMetadata convenience methods). On close, a StoreFile * is sitting in the Filesystem. To refer to it, create a StoreFile instance - * passing filesystem and path. To read, call {@link #createReader()}. + * passing filesystem and path. To read, call {@link #createStoreFileReader()}. *

StoreFiles may also reference store files in another Store. * * The reason for this weird pattern where you use a different instance for the @@ -106,7 +89,7 @@ public class StoreFile { Bytes.toBytes("DELETE_FAMILY_COUNT"); /** Last Bloom filter key in FileInfo */ - private static final byte[] LAST_BLOOM_KEY = Bytes.toBytes("LAST_BLOOM_KEY"); + public static final byte[] LAST_BLOOM_KEY = Bytes.toBytes("LAST_BLOOM_KEY"); /** Key for Timerange information in metadata*/ public static final byte[] TIMERANGE_KEY = Bytes.toBytes("TIMERANGE"); @@ -179,7 +162,7 @@ public class StoreFile { private Map metadataMap; // StoreFile.Reader - private volatile Reader reader; + private volatile StoreFileReader reader; /** * Bloom filter type specified in column family configuration. Does not @@ -393,7 +376,7 @@ public class StoreFile { @VisibleForTesting public int getRefCount() { - return this.reader.refCount.get(); + return this.reader.getRefCount().get(); } /** @@ -418,7 +401,7 @@ public class StoreFile { * @throws IOException * @see #closeReader(boolean) */ - private Reader open(boolean canUseDropBehind) throws IOException { + private StoreFileReader open(boolean canUseDropBehind) throws IOException { if (this.reader != null) { throw new IllegalAccessError("Already open"); } @@ -514,15 +497,42 @@ public class StoreFile { return this.reader; } - public Reader createReader() throws IOException { - return createReader(false); + /** + * @deprecated: Since 1.3. Removed in 2.0. Use {@link #createStoreFileReader()}. + */ + @Deprecated + public StoreFile.Reader createReader() throws IOException { + return (StoreFile.Reader) createStoreFileReader(); } /** * @return Reader for StoreFile. creates if necessary - * @throws IOException + * @deprecated: Since 1.3. Removed in 2.0. Use {@link #createStoreFileReader(boolean)}. */ - public Reader createReader(boolean canUseDropBehind) throws IOException { + @Deprecated + public StoreFile.Reader createReader(boolean canUseDropBehind) throws IOException { + return (StoreFile.Reader) createStoreFileReader(canUseDropBehind); + } + + /** + * @return Current reader. Must call createStoreFileReader first else returns null. + * @see #createStoreFileReader() + * @deprecated: Since 1.3. Removed in 2.0. Use {@link #getStoreFileReader()}. + */ + @Deprecated + public StoreFile.Reader getReader() { + return (StoreFile.Reader) this.reader; + } + + + public StoreFileReader createStoreFileReader() throws IOException { + return createStoreFileReader(false); + } + + /** + * @return Reader for StoreFile. creates if necessary + */ + public StoreFileReader createStoreFileReader(boolean canUseDropBehind) throws IOException { if (this.reader == null) { try { this.reader = open(canUseDropBehind); @@ -541,10 +551,10 @@ public class StoreFile { } /** - * @return Current reader. Must call createReader first else returns null. - * @see #createReader() + * @return Current reader. Must call createStoreFileReader first else returns null. + * @see #createStoreFileReader() */ - public Reader getReader() { + public StoreFileReader getStoreFileReader() { return this.reader; } @@ -603,125 +613,6 @@ public class StoreFile { return sb.toString(); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ICAST_INTEGER_MULTIPLY_CAST_TO_LONG", - justification="Will not overflow") - public static class WriterBuilder { - private final Configuration conf; - private final CacheConfig cacheConf; - private final FileSystem fs; - - private KeyValue.KVComparator comparator = KeyValue.COMPARATOR; - private BloomType bloomType = BloomType.NONE; - private long maxKeyCount = 0; - private Path dir; - private Path filePath; - private InetSocketAddress[] favoredNodes; - private HFileContext fileContext; - - public WriterBuilder(Configuration conf, CacheConfig cacheConf, - FileSystem fs) { - this.conf = conf; - this.cacheConf = cacheConf; - this.fs = fs; - } - - /** - * Use either this method or {@link #withFilePath}, but not both. - * @param dir Path to column family directory. The directory is created if - * does not exist. The file is given a unique name within this - * directory. - * @return this (for chained invocation) - */ - public WriterBuilder withOutputDir(Path dir) { - Preconditions.checkNotNull(dir); - this.dir = dir; - return this; - } - - /** - * Use either this method or {@link #withOutputDir}, but not both. - * @param filePath the StoreFile path to write - * @return this (for chained invocation) - */ - public WriterBuilder withFilePath(Path filePath) { - Preconditions.checkNotNull(filePath); - this.filePath = filePath; - return this; - } - - /** - * @param favoredNodes an array of favored nodes or possibly null - * @return this (for chained invocation) - */ - public WriterBuilder withFavoredNodes(InetSocketAddress[] favoredNodes) { - this.favoredNodes = favoredNodes; - return this; - } - - public WriterBuilder withComparator(KeyValue.KVComparator comparator) { - Preconditions.checkNotNull(comparator); - this.comparator = comparator; - return this; - } - - public WriterBuilder withBloomType(BloomType bloomType) { - Preconditions.checkNotNull(bloomType); - this.bloomType = bloomType; - return this; - } - - /** - * @param maxKeyCount estimated maximum number of keys we expect to add - * @return this (for chained invocation) - */ - public WriterBuilder withMaxKeyCount(long maxKeyCount) { - this.maxKeyCount = maxKeyCount; - return this; - } - - public WriterBuilder withFileContext(HFileContext fileContext) { - this.fileContext = fileContext; - return this; - } - - public WriterBuilder withShouldDropCacheBehind(boolean shouldDropCacheBehind/*NOT USED!!*/) { - // TODO: HAS NO EFFECT!!! FIX!! - return this; - } - /** - * Create a store file writer. Client is responsible for closing file when - * done. If metadata, add BEFORE closing using - * {@link Writer#appendMetadata}. - */ - public Writer build() throws IOException { - if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) { - throw new IllegalArgumentException("Either specify parent directory " + - "or file path"); - } - - if (dir == null) { - dir = filePath.getParent(); - } - - if (!fs.exists(dir)) { - fs.mkdirs(dir); - } - - if (filePath == null) { - filePath = getUniqueFile(fs, dir); - if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) { - bloomType = BloomType.NONE; - } - } - - if (comparator == null) { - comparator = KeyValue.COMPARATOR; - } - return new Writer(fs, filePath, - conf, cacheConf, comparator, bloomType, maxKeyCount, favoredNodes, fileContext); - } - } - /** * @param fs * @param dir Directory to create file in. @@ -737,11 +628,11 @@ public class StoreFile { } public Long getMinimumTimestamp() { - return getReader().timeRange == null? null: getReader().timeRange.getMin(); + return getStoreFileReader().timeRange == null? null: getStoreFileReader().timeRange.getMin(); } public Long getMaximumTimestamp() { - return getReader().timeRange == null? null: getReader().timeRange.getMax(); + return getStoreFileReader().timeRange == null? null: getStoreFileReader().timeRange.getMax(); } @@ -779,889 +670,6 @@ public class StoreFile { } /** - * A StoreFile writer. Use this to read/write HBase Store Files. It is package - * local because it is an implementation detail of the HBase regionserver. - */ - public static class Writer implements Compactor.CellSink { - private final BloomFilterWriter generalBloomFilterWriter; - private final BloomFilterWriter deleteFamilyBloomFilterWriter; - private final BloomType bloomType; - private byte[] lastBloomKey; - private int lastBloomKeyOffset, lastBloomKeyLen; - private KVComparator kvComparator; - private Cell lastCell = null; - private long earliestPutTs = HConstants.LATEST_TIMESTAMP; - private Cell lastDeleteFamilyCell = null; - private long deleteFamilyCnt = 0; - - TimeRangeTracker timeRangeTracker = new TimeRangeTracker(); - /** - * timeRangeTrackerSet is used to figure if we were passed a filled-out TimeRangeTracker or not. - * When flushing a memstore, we set the TimeRangeTracker that it accumulated during updates to - * memstore in here into this Writer and use this variable to indicate that we do not need to - * recalculate the timeRangeTracker bounds; it was done already as part of add-to-memstore. - * A completed TimeRangeTracker is not set in cases of compactions when it is recalculated. - */ - boolean timeRangeTrackerSet = false; - - protected HFile.Writer writer; - - /** - * Creates an HFile.Writer that also write helpful meta data. - * @param fs file system to write to - * @param path file name to create - * @param conf user configuration - * @param comparator key comparator - * @param bloomType bloom filter setting - * @param maxKeys the expected maximum number of keys to be added. Was used - * for Bloom filter size in {@link HFile} format version 1. - * @param favoredNodes - * @param fileContext - The HFile context - * @throws IOException problem writing to FS - */ - private Writer(FileSystem fs, Path path, - final Configuration conf, - CacheConfig cacheConf, - final KVComparator comparator, BloomType bloomType, long maxKeys, - InetSocketAddress[] favoredNodes, HFileContext fileContext) - throws IOException { - writer = HFile.getWriterFactory(conf, cacheConf) - .withPath(fs, path) - .withComparator(comparator) - .withFavoredNodes(favoredNodes) - .withFileContext(fileContext) - .create(); - - this.kvComparator = comparator; - - generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite( - conf, cacheConf, bloomType, - (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); - - if (generalBloomFilterWriter != null) { - this.bloomType = bloomType; - if (LOG.isTraceEnabled()) LOG.trace("Bloom filter type for " + path + ": " + - this.bloomType + ", " + generalBloomFilterWriter.getClass().getSimpleName()); - } else { - // Not using Bloom filters. - this.bloomType = BloomType.NONE; - } - - // initialize delete family Bloom filter when there is NO RowCol Bloom - // filter - if (this.bloomType != BloomType.ROWCOL) { - this.deleteFamilyBloomFilterWriter = BloomFilterFactory - .createDeleteBloomAtWrite(conf, cacheConf, - (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); - } else { - deleteFamilyBloomFilterWriter = null; - } - if (deleteFamilyBloomFilterWriter != null) { - if (LOG.isTraceEnabled()) LOG.trace("Delete Family Bloom filter type for " + path + ": " - + deleteFamilyBloomFilterWriter.getClass().getSimpleName()); - } - } - - /** - * Writes meta data. - * Call before {@link #close()} since its written as meta data to this file. - * @param maxSequenceId Maximum sequence id. - * @param majorCompaction True if this file is product of a major compaction - * @throws IOException problem writing to FS - */ - public void appendMetadata(final long maxSequenceId, final boolean majorCompaction) - throws IOException { - writer.appendFileInfo(MAX_SEQ_ID_KEY, Bytes.toBytes(maxSequenceId)); - writer.appendFileInfo(MAJOR_COMPACTION_KEY, - Bytes.toBytes(majorCompaction)); - appendTrackedTimestampsToMetadata(); - } - - /** - * Add TimestampRange and earliest put timestamp to Metadata - */ - public void appendTrackedTimestampsToMetadata() throws IOException { - appendFileInfo(TIMERANGE_KEY,WritableUtils.toByteArray(timeRangeTracker)); - appendFileInfo(EARLIEST_PUT_TS, Bytes.toBytes(earliestPutTs)); - } - - /** - * Set TimeRangeTracker. - * Called when flushing to pass us a pre-calculated TimeRangeTracker, one made during updates - * to memstore so we don't have to make one ourselves as Cells get appended. Call before first - * append. If this method is not called, we will calculate our own range of the Cells that - * comprise this StoreFile (and write them on the end as metadata). It is good to have this stuff - * passed because it is expensive to make. - */ - public void setTimeRangeTracker(final TimeRangeTracker trt) { - this.timeRangeTracker = trt; - timeRangeTrackerSet = true; - } - - /** - * Record the earlest Put timestamp. - * - * If the timeRangeTracker is not set, - * update TimeRangeTracker to include the timestamp of this key - * @param cell - */ - public void trackTimestamps(final Cell cell) { - if (KeyValue.Type.Put.getCode() == cell.getTypeByte()) { - earliestPutTs = Math.min(earliestPutTs, cell.getTimestamp()); - } - if (!timeRangeTrackerSet) { - timeRangeTracker.includeTimestamp(cell); - } - } - - private void appendGeneralBloomfilter(final Cell cell) throws IOException { - if (this.generalBloomFilterWriter != null) { - // only add to the bloom filter on a new, unique key - boolean newKey = true; - if (this.lastCell != null) { - switch(bloomType) { - case ROW: - newKey = ! kvComparator.matchingRows(cell, lastCell); - break; - case ROWCOL: - newKey = ! kvComparator.matchingRowColumn(cell, lastCell); - break; - case NONE: - newKey = false; - break; - default: - throw new IOException("Invalid Bloom filter type: " + bloomType + - " (ROW or ROWCOL expected)"); - } - } - if (newKey) { - /* - * http://2.bp.blogspot.com/_Cib_A77V54U/StZMrzaKufI/AAAAAAAAADo/ZhK7bGoJdMQ/s400/KeyValue.png - * Key = RowLen + Row + FamilyLen + Column [Family + Qualifier] + TimeStamp - * - * 2 Types of Filtering: - * 1. Row = Row - * 2. RowCol = Row + Qualifier - */ - byte[] bloomKey; - int bloomKeyOffset, bloomKeyLen; - - switch (bloomType) { - case ROW: - bloomKey = cell.getRowArray(); - bloomKeyOffset = cell.getRowOffset(); - bloomKeyLen = cell.getRowLength(); - break; - case ROWCOL: - // merge(row, qualifier) - // TODO: could save one buffer copy in case of compound Bloom - // filters when this involves creating a KeyValue - bloomKey = generalBloomFilterWriter.createBloomKey(cell.getRowArray(), - cell.getRowOffset(), cell.getRowLength(), cell.getQualifierArray(), - cell.getQualifierOffset(), cell.getQualifierLength()); - bloomKeyOffset = 0; - bloomKeyLen = bloomKey.length; - break; - default: - throw new IOException("Invalid Bloom filter type: " + bloomType + - " (ROW or ROWCOL expected)"); - } - generalBloomFilterWriter.add(bloomKey, bloomKeyOffset, bloomKeyLen); - if (lastBloomKey != null - && generalBloomFilterWriter.getComparator().compareFlatKey(bloomKey, - bloomKeyOffset, bloomKeyLen, lastBloomKey, - lastBloomKeyOffset, lastBloomKeyLen) <= 0) { - throw new IOException("Non-increasing Bloom keys: " - + Bytes.toStringBinary(bloomKey, bloomKeyOffset, bloomKeyLen) - + " after " - + Bytes.toStringBinary(lastBloomKey, lastBloomKeyOffset, - lastBloomKeyLen)); - } - lastBloomKey = bloomKey; - lastBloomKeyOffset = bloomKeyOffset; - lastBloomKeyLen = bloomKeyLen; - this.lastCell = cell; - } - } - } - - private void appendDeleteFamilyBloomFilter(final Cell cell) - throws IOException { - if (!CellUtil.isDeleteFamily(cell) && !CellUtil.isDeleteFamilyVersion(cell)) { - return; - } - - // increase the number of delete family in the store file - deleteFamilyCnt++; - if (null != this.deleteFamilyBloomFilterWriter) { - boolean newKey = true; - if (lastDeleteFamilyCell != null) { - newKey = !kvComparator.matchingRows(cell, lastDeleteFamilyCell); - } - if (newKey) { - this.deleteFamilyBloomFilterWriter.add(cell.getRowArray(), - cell.getRowOffset(), cell.getRowLength()); - this.lastDeleteFamilyCell = cell; - } - } - } - - public void append(final Cell cell) throws IOException { - appendGeneralBloomfilter(cell); - appendDeleteFamilyBloomFilter(cell); - writer.append(cell); - trackTimestamps(cell); - } - - public Path getPath() { - return this.writer.getPath(); - } - - boolean hasGeneralBloom() { - return this.generalBloomFilterWriter != null; - } - - /** - * For unit testing only. - * - * @return the Bloom filter used by this writer. - */ - BloomFilterWriter getGeneralBloomWriter() { - return generalBloomFilterWriter; - } - - private boolean closeBloomFilter(BloomFilterWriter bfw) throws IOException { - boolean haveBloom = (bfw != null && bfw.getKeyCount() > 0); - if (haveBloom) { - bfw.compactBloom(); - } - return haveBloom; - } - - private boolean closeGeneralBloomFilter() throws IOException { - boolean hasGeneralBloom = closeBloomFilter(generalBloomFilterWriter); - - // add the general Bloom filter writer and append file info - if (hasGeneralBloom) { - writer.addGeneralBloomFilter(generalBloomFilterWriter); - writer.appendFileInfo(BLOOM_FILTER_TYPE_KEY, - Bytes.toBytes(bloomType.toString())); - if (lastBloomKey != null) { - writer.appendFileInfo(LAST_BLOOM_KEY, Arrays.copyOfRange( - lastBloomKey, lastBloomKeyOffset, lastBloomKeyOffset - + lastBloomKeyLen)); - } - } - return hasGeneralBloom; - } - - private boolean closeDeleteFamilyBloomFilter() throws IOException { - boolean hasDeleteFamilyBloom = closeBloomFilter(deleteFamilyBloomFilterWriter); - - // add the delete family Bloom filter writer - if (hasDeleteFamilyBloom) { - writer.addDeleteFamilyBloomFilter(deleteFamilyBloomFilterWriter); - } - - // append file info about the number of delete family kvs - // even if there is no delete family Bloom. - writer.appendFileInfo(DELETE_FAMILY_COUNT, - Bytes.toBytes(this.deleteFamilyCnt)); - - return hasDeleteFamilyBloom; - } - - public void close() throws IOException { - boolean hasGeneralBloom = this.closeGeneralBloomFilter(); - boolean hasDeleteFamilyBloom = this.closeDeleteFamilyBloomFilter(); - - writer.close(); - - // Log final Bloom filter statistics. This needs to be done after close() - // because compound Bloom filters might be finalized as part of closing. - if (StoreFile.LOG.isTraceEnabled()) { - StoreFile.LOG.trace((hasGeneralBloom ? "" : "NO ") + "General Bloom and " + - (hasDeleteFamilyBloom ? "" : "NO ") + "DeleteFamily" + " was added to HFile " + - getPath()); - } - - } - - public void appendFileInfo(byte[] key, byte[] value) throws IOException { - writer.appendFileInfo(key, value); - } - - /** For use in testing, e.g. {@link org.apache.hadoop.hbase.regionserver.CreateRandomStoreFile} - */ - HFile.Writer getHFileWriter() { - return writer; - } - } - - /** - * Reader for a StoreFile. - */ - public static class Reader { - private static final Log LOG = LogFactory.getLog(Reader.class.getName()); - - protected BloomFilter generalBloomFilter = null; - protected BloomFilter deleteFamilyBloomFilter = null; - protected BloomType bloomFilterType; - private final HFile.Reader reader; - protected TimeRange timeRange; - protected long sequenceID = -1; - private byte[] lastBloomKey; - private long deleteFamilyCnt = -1; - private boolean bulkLoadResult = false; - // Counter that is incremented every time a scanner is created on the - // store file. It is decremented when the scan on the store file is - // done. - private AtomicInteger refCount = new AtomicInteger(0); - // Indicates if the file got compacted - private volatile boolean compactedAway = false; - - public Reader(FileSystem fs, Path path, CacheConfig cacheConf, Configuration conf) - throws IOException { - reader = HFile.createReader(fs, path, cacheConf, conf); - bloomFilterType = BloomType.NONE; - } - - void markCompactedAway() { - this.compactedAway = true; - } - - public Reader(FileSystem fs, Path path, FSDataInputStreamWrapper in, long size, - CacheConfig cacheConf, Configuration conf) throws IOException { - reader = HFile.createReader(fs, path, in, size, cacheConf, conf); - bloomFilterType = BloomType.NONE; - } - - public void setReplicaStoreFile(boolean isPrimaryReplicaStoreFile) { - reader.setPrimaryReplicaReader(isPrimaryReplicaStoreFile); - } - public boolean isPrimaryReplicaReader() { - return reader.isPrimaryReplicaReader(); - } - - /** - * ONLY USE DEFAULT CONSTRUCTOR FOR UNIT TESTS - */ - Reader() { - this.reader = null; - } - - public KVComparator getComparator() { - return reader.getComparator(); - } - - /** - * Get a scanner to scan over this StoreFile. Do not use - * this overload if using this scanner for compactions. - * - * @param cacheBlocks should this scanner cache blocks? - * @param pread use pread (for highly concurrent small readers) - * @return a scanner - */ - public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, - boolean pread) { - return getStoreFileScanner(cacheBlocks, pread, false, - // 0 is passed as readpoint because this method is only used by test - // where StoreFile is directly operated upon - 0); - } - - /** - * Get a scanner to scan over this StoreFile. - * - * @param cacheBlocks should this scanner cache blocks? - * @param pread use pread (for highly concurrent small readers) - * @param isCompaction is scanner being used for compaction? - * @return a scanner - */ - public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, - boolean pread, - boolean isCompaction, long readPt) { - // Increment the ref count - refCount.incrementAndGet(); - return new StoreFileScanner(this, - getScanner(cacheBlocks, pread, isCompaction), - !isCompaction, reader.hasMVCCInfo(), readPt); - } - - /** - * Decrement the ref count associated with the reader when ever a scanner associated - * with the reader is closed - */ - void decrementRefCount() { - refCount.decrementAndGet(); - } - - /** - * @return true if the file is still used in reads - */ - public boolean isReferencedInReads() { - return refCount.get() != 0; - } - - /** - * @return true if the file is compacted - */ - public boolean isCompactedAway() { - return this.compactedAway; - } - - /** - * @deprecated Do not write further code which depends on this call. Instead - * use getStoreFileScanner() which uses the StoreFileScanner class/interface - * which is the preferred way to scan a store with higher level concepts. - * - * @param cacheBlocks should we cache the blocks? - * @param pread use pread (for concurrent small readers) - * @return the underlying HFileScanner - */ - @Deprecated - public HFileScanner getScanner(boolean cacheBlocks, boolean pread) { - return getScanner(cacheBlocks, pread, false); - } - - /** - * @deprecated Do not write further code which depends on this call. Instead - * use getStoreFileScanner() which uses the StoreFileScanner class/interface - * which is the preferred way to scan a store with higher level concepts. - * - * @param cacheBlocks - * should we cache the blocks? - * @param pread - * use pread (for concurrent small readers) - * @param isCompaction - * is scanner being used for compaction? - * @return the underlying HFileScanner - */ - @Deprecated - public HFileScanner getScanner(boolean cacheBlocks, boolean pread, - boolean isCompaction) { - return reader.getScanner(cacheBlocks, pread, isCompaction); - } - - public void close(boolean evictOnClose) throws IOException { - reader.close(evictOnClose); - } - - /** - * Check if this storeFile may contain keys within the TimeRange that - * have not expired (i.e. not older than oldestUnexpiredTS). - * @param timeRange the timeRange to restrict - * @param oldestUnexpiredTS the oldest timestamp that is not expired, as - * determined by the column family's TTL - * @return false if queried keys definitely don't exist in this StoreFile - */ - boolean passesTimerangeFilter(TimeRange tr, long oldestUnexpiredTS) { - return this.timeRange == null? true: - this.timeRange.includesTimeRange(tr) && this.timeRange.getMax() >= oldestUnexpiredTS; - } - - /** - * Checks whether the given scan passes the Bloom filter (if present). Only - * checks Bloom filters for single-row or single-row-column scans. Bloom - * filter checking for multi-gets is implemented as part of the store - * scanner system (see {@link StoreFileScanner#seekExactly}) and uses - * the lower-level API {@link #passesGeneralBloomFilter(byte[], int, int, byte[], - * int, int)}. - * - * @param scan the scan specification. Used to determine the row, and to - * check whether this is a single-row ("get") scan. - * @param columns the set of columns. Only used for row-column Bloom - * filters. - * @return true if the scan with the given column set passes the Bloom - * filter, or if the Bloom filter is not applicable for the scan. - * False if the Bloom filter is applicable and the scan fails it. - */ - boolean passesBloomFilter(Scan scan, - final SortedSet columns) { - // Multi-column non-get scans will use Bloom filters through the - // lower-level API function that this function calls. - if (!scan.isGetScan()) { - return true; - } - - byte[] row = scan.getStartRow(); - switch (this.bloomFilterType) { - case ROW: - return passesGeneralBloomFilter(row, 0, row.length, null, 0, 0); - - case ROWCOL: - if (columns != null && columns.size() == 1) { - byte[] column = columns.first(); - return passesGeneralBloomFilter(row, 0, row.length, column, 0, - column.length); - } - - // For multi-column queries the Bloom filter is checked from the - // seekExact operation. - return true; - - default: - return true; - } - } - - public boolean passesDeleteFamilyBloomFilter(byte[] row, int rowOffset, - int rowLen) { - // Cache Bloom filter as a local variable in case it is set to null by - // another thread on an IO error. - BloomFilter bloomFilter = this.deleteFamilyBloomFilter; - - // Empty file or there is no delete family at all - if (reader.getTrailer().getEntryCount() == 0 || deleteFamilyCnt == 0) { - return false; - } - - if (bloomFilter == null) { - return true; - } - - try { - if (!bloomFilter.supportsAutoLoading()) { - return true; - } - return bloomFilter.contains(row, rowOffset, rowLen, null); - } catch (IllegalArgumentException e) { - LOG.error("Bad Delete Family bloom filter data -- proceeding without", - e); - setDeleteFamilyBloomFilterFaulty(); - } - - return true; - } - - /** - * A method for checking Bloom filters. Called directly from - * StoreFileScanner in case of a multi-column query. - * - * @param row - * @param rowOffset - * @param rowLen - * @param col - * @param colOffset - * @param colLen - * @return True if passes - */ - public boolean passesGeneralBloomFilter(byte[] row, int rowOffset, - int rowLen, byte[] col, int colOffset, int colLen) { - // Cache Bloom filter as a local variable in case it is set to null by - // another thread on an IO error. - BloomFilter bloomFilter = this.generalBloomFilter; - if (bloomFilter == null) { - return true; - } - - byte[] key; - switch (bloomFilterType) { - case ROW: - if (col != null) { - throw new RuntimeException("Row-only Bloom filter called with " + - "column specified"); - } - if (rowOffset != 0 || rowLen != row.length) { - throw new AssertionError("For row-only Bloom filters the row " - + "must occupy the whole array"); - } - key = row; - break; - - case ROWCOL: - key = bloomFilter.createBloomKey(row, rowOffset, rowLen, col, - colOffset, colLen); - - break; - - default: - return true; - } - - // Empty file - if (reader.getTrailer().getEntryCount() == 0) - return false; - - try { - boolean shouldCheckBloom; - ByteBuffer bloom; - if (bloomFilter.supportsAutoLoading()) { - bloom = null; - shouldCheckBloom = true; - } else { - bloom = reader.getMetaBlock(HFile.BLOOM_FILTER_DATA_KEY, - true); - shouldCheckBloom = bloom != null; - } - - if (shouldCheckBloom) { - boolean exists; - - // Whether the primary Bloom key is greater than the last Bloom key - // from the file info. For row-column Bloom filters this is not yet - // a sufficient condition to return false. - boolean keyIsAfterLast = lastBloomKey != null - && bloomFilter.getComparator().compareFlatKey(key, lastBloomKey) > 0; - - if (bloomFilterType == BloomType.ROWCOL) { - // Since a Row Delete is essentially a DeleteFamily applied to all - // columns, a file might be skipped if using row+col Bloom filter. - // In order to ensure this file is included an additional check is - // required looking only for a row bloom. - byte[] rowBloomKey = bloomFilter.createBloomKey(row, rowOffset, rowLen, - null, 0, 0); - - if (keyIsAfterLast - && bloomFilter.getComparator().compareFlatKey(rowBloomKey, - lastBloomKey) > 0) { - exists = false; - } else { - exists = - bloomFilter.contains(key, 0, key.length, bloom) || - bloomFilter.contains(rowBloomKey, 0, rowBloomKey.length, - bloom); - } - } else { - exists = !keyIsAfterLast - && bloomFilter.contains(key, 0, key.length, bloom); - } - - return exists; - } - } catch (IOException e) { - LOG.error("Error reading bloom filter data -- proceeding without", - e); - setGeneralBloomFilterFaulty(); - } catch (IllegalArgumentException e) { - LOG.error("Bad bloom filter data -- proceeding without", e); - setGeneralBloomFilterFaulty(); - } - - return true; - } - - /** - * Checks whether the given scan rowkey range overlaps with the current storefile's - * @param scan the scan specification. Used to determine the rowkey range. - * @return true if there is overlap, false otherwise - */ - public boolean passesKeyRangeFilter(Scan scan) { - if (this.getFirstKey() == null || this.getLastKey() == null) { - // the file is empty - return false; - } - if (Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW) - && Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)) { - return true; - } - KeyValue smallestScanKeyValue = scan.isReversed() ? KeyValueUtil - .createFirstOnRow(scan.getStopRow()) : KeyValueUtil.createFirstOnRow(scan - .getStartRow()); - KeyValue largestScanKeyValue = scan.isReversed() ? KeyValueUtil - .createLastOnRow(scan.getStartRow()) : KeyValueUtil.createLastOnRow(scan - .getStopRow()); - boolean nonOverLapping = (getComparator().compareFlatKey( - this.getFirstKey(), largestScanKeyValue.getKey()) > 0 && !Bytes - .equals(scan.isReversed() ? scan.getStartRow() : scan.getStopRow(), - HConstants.EMPTY_END_ROW)) - || getComparator().compareFlatKey(this.getLastKey(), - smallestScanKeyValue.getKey()) < 0; - return !nonOverLapping; - } - - public Map loadFileInfo() throws IOException { - Map fi = reader.loadFileInfo(); - - byte[] b = fi.get(BLOOM_FILTER_TYPE_KEY); - if (b != null) { - bloomFilterType = BloomType.valueOf(Bytes.toString(b)); - } - - lastBloomKey = fi.get(LAST_BLOOM_KEY); - byte[] cnt = fi.get(DELETE_FAMILY_COUNT); - if (cnt != null) { - deleteFamilyCnt = Bytes.toLong(cnt); - } - - return fi; - } - - public void loadBloomfilter() { - this.loadBloomfilter(BlockType.GENERAL_BLOOM_META); - this.loadBloomfilter(BlockType.DELETE_FAMILY_BLOOM_META); - } - - private void loadBloomfilter(BlockType blockType) { - try { - if (blockType == BlockType.GENERAL_BLOOM_META) { - if (this.generalBloomFilter != null) - return; // Bloom has been loaded - - DataInput bloomMeta = reader.getGeneralBloomFilterMetadata(); - if (bloomMeta != null) { - // sanity check for NONE Bloom filter - if (bloomFilterType == BloomType.NONE) { - throw new IOException( - "valid bloom filter type not found in FileInfo"); - } else { - generalBloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, - reader); - if (LOG.isTraceEnabled()) { - LOG.trace("Loaded " + bloomFilterType.toString() + " " - + generalBloomFilter.getClass().getSimpleName() - + " metadata for " + reader.getName()); - } - } - } - } else if (blockType == BlockType.DELETE_FAMILY_BLOOM_META) { - if (this.deleteFamilyBloomFilter != null) - return; // Bloom has been loaded - - DataInput bloomMeta = reader.getDeleteBloomFilterMetadata(); - if (bloomMeta != null) { - deleteFamilyBloomFilter = BloomFilterFactory.createFromMeta( - bloomMeta, reader); - LOG.info("Loaded Delete Family Bloom (" - + deleteFamilyBloomFilter.getClass().getSimpleName() - + ") metadata for " + reader.getName()); - } - } else { - throw new RuntimeException("Block Type: " + blockType.toString() - + "is not supported for Bloom filter"); - } - } catch (IOException e) { - LOG.error("Error reading bloom filter meta for " + blockType - + " -- proceeding without", e); - setBloomFilterFaulty(blockType); - } catch (IllegalArgumentException e) { - LOG.error("Bad bloom filter meta " + blockType - + " -- proceeding without", e); - setBloomFilterFaulty(blockType); - } - } - - private void setBloomFilterFaulty(BlockType blockType) { - if (blockType == BlockType.GENERAL_BLOOM_META) { - setGeneralBloomFilterFaulty(); - } else if (blockType == BlockType.DELETE_FAMILY_BLOOM_META) { - setDeleteFamilyBloomFilterFaulty(); - } - } - - /** - * The number of Bloom filter entries in this store file, or an estimate - * thereof, if the Bloom filter is not loaded. This always returns an upper - * bound of the number of Bloom filter entries. - * - * @return an estimate of the number of Bloom filter entries in this file - */ - public long getFilterEntries() { - return generalBloomFilter != null ? generalBloomFilter.getKeyCount() - : reader.getEntries(); - } - - public void setGeneralBloomFilterFaulty() { - generalBloomFilter = null; - } - - public void setDeleteFamilyBloomFilterFaulty() { - this.deleteFamilyBloomFilter = null; - } - - public byte[] getLastKey() { - return reader.getLastKey(); - } - - public byte[] getLastRowKey() { - return reader.getLastRowKey(); - } - - public byte[] midkey() throws IOException { - return reader.midkey(); - } - - public long length() { - return reader.length(); - } - - public long getTotalUncompressedBytes() { - return reader.getTrailer().getTotalUncompressedBytes(); - } - - public long getEntries() { - return reader.getEntries(); - } - - public long getDeleteFamilyCnt() { - return deleteFamilyCnt; - } - - public byte[] getFirstKey() { - return reader.getFirstKey(); - } - - public long indexSize() { - return reader.indexSize(); - } - - public BloomType getBloomFilterType() { - return this.bloomFilterType; - } - - public long getSequenceID() { - return sequenceID; - } - - public void setSequenceID(long sequenceID) { - this.sequenceID = sequenceID; - } - - public void setBulkLoaded(boolean bulkLoadResult) { - this.bulkLoadResult = bulkLoadResult; - } - - public boolean isBulkLoaded() { - return this.bulkLoadResult; - } - - BloomFilter getGeneralBloomFilter() { - return generalBloomFilter; - } - - long getUncompressedDataIndexSize() { - return reader.getTrailer().getUncompressedDataIndexSize(); - } - - public long getTotalBloomSize() { - if (generalBloomFilter == null) - return 0; - return generalBloomFilter.getByteSize(); - } - - public int getHFileVersion() { - return reader.getTrailer().getMajorVersion(); - } - - public int getHFileMinorVersion() { - return reader.getTrailer().getMinorVersion(); - } - - public HFile.Reader getHFileReader() { - return reader; - } - - void disableBloomFilterForTesting() { - generalBloomFilter = null; - this.deleteFamilyBloomFilter = null; - } - - public long getMaxTimestamp() { - return timeRange == null ? Long.MAX_VALUE : timeRange.getMax(); - } - } - - /** * Useful comparators for comparing StoreFiles. */ public abstract static class Comparators { @@ -1704,8 +712,8 @@ public class StoreFile { private static class GetFileSize implements Function { @Override public Long apply(StoreFile sf) { - if (sf.getReader() != null) { - return sf.getReader().length(); + if (sf.getStoreFileReader() != null) { + return sf.getStoreFileReader().length(); } else { // the reader may be null for the compacted files and if the archiving // had failed. @@ -1736,4 +744,34 @@ public class StoreFile { } } } + + /** + * @deprecated Since 1.3, removed in 2.0. This dummy is here only to maintain backward + * compatibility for some coprocessor functions. New uses should use {@link StoreFileWriter}. + */ + @Deprecated + public class Writer extends StoreFileWriter { + Writer(FileSystem fs, Path path, final Configuration conf, CacheConfig cacheConf, + final KeyValue.KVComparator comparator, BloomType bloomType, long maxKeys, + InetSocketAddress[] favoredNodes, HFileContext fileContext) throws IOException { + super(fs, path, conf, cacheConf, comparator, bloomType, maxKeys, favoredNodes, fileContext); + } + } + + /** + * @deprecated Since 1.3, removed in 2.0. This dummy is here only to maintain backward + * compatibility for some coprocessor functions. New uses should use {@link StoreFileReader}. + */ + @Deprecated + public class Reader extends StoreFileReader { + public Reader(FileSystem fs, Path path, CacheConfig cacheConf, Configuration conf) + throws IOException { + super(fs, path, cacheConf, conf); + } + + public Reader(FileSystem fs, Path path, FSDataInputStreamWrapper in, long size, + CacheConfig cacheConf, Configuration conf) throws IOException { + super(fs, path, in, size, cacheConf, conf); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java index 08259de..834ca9e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java @@ -222,7 +222,7 @@ public class StoreFileInfo { * @param cacheConf The cache configuration and block cache reference. * @return The StoreFile.Reader for the file */ - public StoreFile.Reader open(final FileSystem fs, + public StoreFileReader open(final FileSystem fs, final CacheConfig cacheConf, final boolean canUseDropBehind) throws IOException { FSDataInputStreamWrapper in; FileStatus status; @@ -246,9 +246,9 @@ public class StoreFileInfo { long length = status.getLen(); hdfsBlocksDistribution = computeHDFSBlocksDistribution(fs); - StoreFile.Reader reader = null; + StoreFileReader reader = null; if (this.coprocessorHost != null) { - reader = this.coprocessorHost.preStoreFileReaderOpen(fs, this.getPath(), in, length, + reader = this.coprocessorHost.preStoreFileOpenReader(fs, this.getPath(), in, length, cacheConf, reference); } if (reader == null) { @@ -256,11 +256,11 @@ public class StoreFileInfo { reader = new HalfStoreFileReader(fs, this.getPath(), in, length, cacheConf, reference, conf); } else { - reader = new StoreFile.Reader(fs, status.getPath(), in, length, cacheConf, conf); + reader = new StoreFileReader(fs, status.getPath(), in, length, cacheConf, conf); } } if (this.coprocessorHost != null) { - reader = this.coprocessorHost.postStoreFileReaderOpen(fs, this.getPath(), in, length, + reader = this.coprocessorHost.postStoreFileOpenReader(fs, this.getPath(), in, length, cacheConf, reference, reader); } return reader; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java new file mode 100644 index 0000000..e880a00 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java @@ -0,0 +1,616 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; +import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.io.hfile.BlockType; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.io.hfile.HFileScanner; +import org.apache.hadoop.hbase.util.BloomFilter; +import org.apache.hadoop.hbase.util.BloomFilterFactory; +import org.apache.hadoop.hbase.util.Bytes; + +import java.io.DataInput; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Map; +import java.util.SortedSet; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Reader for a StoreFile. + */ +@InterfaceAudience.Private +public class StoreFileReader { + public AtomicInteger getRefCount() { + return refCount; + } + + private static final Log LOG = LogFactory.getLog(StoreFileReader.class.getName()); + + protected BloomFilter generalBloomFilter = null; + protected BloomFilter deleteFamilyBloomFilter = null; + protected BloomType bloomFilterType; + private final HFile.Reader reader; + protected TimeRange timeRange; + protected long sequenceID = -1; + private byte[] lastBloomKey; + private long deleteFamilyCnt = -1; + private boolean bulkLoadResult = false; + // Counter that is incremented every time a scanner is created on the + // store file. It is decremented when the scan on the store file is + // done. + private AtomicInteger refCount = new AtomicInteger(0); + // Indicates if the file got compacted + private volatile boolean compactedAway = false; + + public StoreFileReader(FileSystem fs, Path path, CacheConfig cacheConf, Configuration conf) + throws IOException { + reader = HFile.createReader(fs, path, cacheConf, conf); + bloomFilterType = BloomType.NONE; + } + + void markCompactedAway() { + this.compactedAway = true; + } + + public StoreFileReader(FileSystem fs, Path path, FSDataInputStreamWrapper in, long size, + CacheConfig cacheConf, Configuration conf) throws IOException { + reader = HFile.createReader(fs, path, in, size, cacheConf, conf); + bloomFilterType = BloomType.NONE; + } + + public void setReplicaStoreFile(boolean isPrimaryReplicaStoreFile) { + reader.setPrimaryReplicaReader(isPrimaryReplicaStoreFile); + } + public boolean isPrimaryReplicaReader() { + return reader.isPrimaryReplicaReader(); + } + + /** + * ONLY USE DEFAULT CONSTRUCTOR FOR UNIT TESTS + */ + StoreFileReader() { + this.reader = null; + } + + public KVComparator getComparator() { + return reader.getComparator(); + } + + /** + * Get a scanner to scan over this StoreFile. Do not use + * this overload if using this scanner for compactions. + * + * @param cacheBlocks should this scanner cache blocks? + * @param pread use pread (for highly concurrent small readers) + * @return a scanner + */ + public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, + boolean pread) { + return getStoreFileScanner(cacheBlocks, pread, false, + // 0 is passed as readpoint because this method is only used by test + // where StoreFile is directly operated upon + 0); + } + + /** + * Get a scanner to scan over this StoreFile. + * + * @param cacheBlocks should this scanner cache blocks? + * @param pread use pread (for highly concurrent small readers) + * @param isCompaction is scanner being used for compaction? + * @return a scanner + */ + public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, + boolean pread, + boolean isCompaction, long readPt) { + // Increment the ref count + refCount.incrementAndGet(); + return new StoreFileScanner(this, + getScanner(cacheBlocks, pread, isCompaction), + !isCompaction, reader.hasMVCCInfo(), readPt); + } + + /** + * Decrement the ref count associated with the reader when ever a scanner associated + * with the reader is closed + */ + void decrementRefCount() { + refCount.decrementAndGet(); + } + + /** + * @return true if the file is still used in reads + */ + public boolean isReferencedInReads() { + return refCount.get() != 0; + } + + /** + * @return true if the file is compacted + */ + public boolean isCompactedAway() { + return this.compactedAway; + } + + /** + * @deprecated Do not write further code which depends on this call. Instead + * use getStoreFileScanner() which uses the StoreFileScanner class/interface + * which is the preferred way to scan a store with higher level concepts. + * + * @param cacheBlocks should we cache the blocks? + * @param pread use pread (for concurrent small readers) + * @return the underlying HFileScanner + */ + @Deprecated + public HFileScanner getScanner(boolean cacheBlocks, boolean pread) { + return getScanner(cacheBlocks, pread, false); + } + + /** + * @deprecated Do not write further code which depends on this call. Instead + * use getStoreFileScanner() which uses the StoreFileScanner class/interface + * which is the preferred way to scan a store with higher level concepts. + * + * @param cacheBlocks + * should we cache the blocks? + * @param pread + * use pread (for concurrent small readers) + * @param isCompaction + * is scanner being used for compaction? + * @return the underlying HFileScanner + */ + @Deprecated + public HFileScanner getScanner(boolean cacheBlocks, boolean pread, + boolean isCompaction) { + return reader.getScanner(cacheBlocks, pread, isCompaction); + } + + public void close(boolean evictOnClose) throws IOException { + reader.close(evictOnClose); + } + + /** + * Check if this storeFile may contain keys within the TimeRange that + * have not expired (i.e. not older than oldestUnexpiredTS). + * @param tr the timeRange to restrict + * @param oldestUnexpiredTS the oldest timestamp that is not expired, as + * determined by the column family's TTL + * @return false if queried keys definitely don't exist in this StoreFile + */ + boolean passesTimerangeFilter(TimeRange tr, long oldestUnexpiredTS) { + return this.timeRange == null? true: + this.timeRange.includesTimeRange(tr) && this.timeRange.getMax() >= oldestUnexpiredTS; + } + + /** + * Checks whether the given scan passes the Bloom filter (if present). Only + * checks Bloom filters for single-row or single-row-column scans. Bloom + * filter checking for multi-gets is implemented as part of the store + * scanner system (see {@link StoreFileScanner#seekExactly}) and uses + * the lower-level API {@link #passesGeneralBloomFilter(byte[], int, int, byte[], + * int, int)}. + * + * @param scan the scan specification. Used to determine the row, and to + * check whether this is a single-row ("get") scan. + * @param columns the set of columns. Only used for row-column Bloom + * filters. + * @return true if the scan with the given column set passes the Bloom + * filter, or if the Bloom filter is not applicable for the scan. + * False if the Bloom filter is applicable and the scan fails it. + */ + boolean passesBloomFilter(Scan scan, + final SortedSet columns) { + // Multi-column non-get scans will use Bloom filters through the + // lower-level API function that this function calls. + if (!scan.isGetScan()) { + return true; + } + + byte[] row = scan.getStartRow(); + switch (this.bloomFilterType) { + case ROW: + return passesGeneralBloomFilter(row, 0, row.length, null, 0, 0); + + case ROWCOL: + if (columns != null && columns.size() == 1) { + byte[] column = columns.first(); + return passesGeneralBloomFilter(row, 0, row.length, column, 0, + column.length); + } + + // For multi-column queries the Bloom filter is checked from the + // seekExact operation. + return true; + + default: + return true; + } + } + + public boolean passesDeleteFamilyBloomFilter(byte[] row, int rowOffset, + int rowLen) { + // Cache Bloom filter as a local variable in case it is set to null by + // another thread on an IO error. + BloomFilter bloomFilter = this.deleteFamilyBloomFilter; + + // Empty file or there is no delete family at all + if (reader.getTrailer().getEntryCount() == 0 || deleteFamilyCnt == 0) { + return false; + } + + if (bloomFilter == null) { + return true; + } + + try { + if (!bloomFilter.supportsAutoLoading()) { + return true; + } + return bloomFilter.contains(row, rowOffset, rowLen, null); + } catch (IllegalArgumentException e) { + LOG.error("Bad Delete Family bloom filter data -- proceeding without", + e); + setDeleteFamilyBloomFilterFaulty(); + } + + return true; + } + + /** + * A method for checking Bloom filters. Called directly from + * StoreFileScanner in case of a multi-column query. + * + * @param row + * @param rowOffset + * @param rowLen + * @param col + * @param colOffset + * @param colLen + * @return True if passes + */ + public boolean passesGeneralBloomFilter(byte[] row, int rowOffset, + int rowLen, byte[] col, int colOffset, int colLen) { + // Cache Bloom filter as a local variable in case it is set to null by + // another thread on an IO error. + BloomFilter bloomFilter = this.generalBloomFilter; + if (bloomFilter == null) { + return true; + } + + byte[] key; + switch (bloomFilterType) { + case ROW: + if (col != null) { + throw new RuntimeException("Row-only Bloom filter called with " + + "column specified"); + } + if (rowOffset != 0 || rowLen != row.length) { + throw new AssertionError("For row-only Bloom filters the row " + + "must occupy the whole array"); + } + key = row; + break; + + case ROWCOL: + key = bloomFilter.createBloomKey(row, rowOffset, rowLen, col, + colOffset, colLen); + + break; + + default: + return true; + } + + // Empty file + if (reader.getTrailer().getEntryCount() == 0) + return false; + + try { + boolean shouldCheckBloom; + ByteBuffer bloom; + if (bloomFilter.supportsAutoLoading()) { + bloom = null; + shouldCheckBloom = true; + } else { + bloom = reader.getMetaBlock(HFile.BLOOM_FILTER_DATA_KEY, + true); + shouldCheckBloom = bloom != null; + } + + if (shouldCheckBloom) { + boolean exists; + + // Whether the primary Bloom key is greater than the last Bloom key + // from the file info. For row-column Bloom filters this is not yet + // a sufficient condition to return false. + boolean keyIsAfterLast = lastBloomKey != null + && bloomFilter.getComparator().compareFlatKey(key, lastBloomKey) > 0; + + if (bloomFilterType == BloomType.ROWCOL) { + // Since a Row Delete is essentially a DeleteFamily applied to all + // columns, a file might be skipped if using row+col Bloom filter. + // In order to ensure this file is included an additional check is + // required looking only for a row bloom. + byte[] rowBloomKey = bloomFilter.createBloomKey(row, rowOffset, rowLen, + null, 0, 0); + + if (keyIsAfterLast + && bloomFilter.getComparator().compareFlatKey(rowBloomKey, + lastBloomKey) > 0) { + exists = false; + } else { + exists = + bloomFilter.contains(key, 0, key.length, bloom) || + bloomFilter.contains(rowBloomKey, 0, rowBloomKey.length, + bloom); + } + } else { + exists = !keyIsAfterLast + && bloomFilter.contains(key, 0, key.length, bloom); + } + + return exists; + } + } catch (IOException e) { + LOG.error("Error reading bloom filter data -- proceeding without", + e); + setGeneralBloomFilterFaulty(); + } catch (IllegalArgumentException e) { + LOG.error("Bad bloom filter data -- proceeding without", e); + setGeneralBloomFilterFaulty(); + } + + return true; + } + + /** + * Checks whether the given scan rowkey range overlaps with the current storefile's + * @param scan the scan specification. Used to determine the rowkey range. + * @return true if there is overlap, false otherwise + */ + public boolean passesKeyRangeFilter(Scan scan) { + if (this.getFirstKey() == null || this.getLastKey() == null) { + // the file is empty + return false; + } + if (Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW) + && Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)) { + return true; + } + KeyValue smallestScanKeyValue = scan.isReversed() ? KeyValueUtil + .createFirstOnRow(scan.getStopRow()) : KeyValueUtil.createFirstOnRow(scan + .getStartRow()); + KeyValue largestScanKeyValue = scan.isReversed() ? KeyValueUtil + .createLastOnRow(scan.getStartRow()) : KeyValueUtil.createLastOnRow(scan + .getStopRow()); + boolean nonOverLapping = (getComparator().compareFlatKey( + this.getFirstKey(), largestScanKeyValue.getKey()) > 0 && !Bytes + .equals(scan.isReversed() ? scan.getStartRow() : scan.getStopRow(), + HConstants.EMPTY_END_ROW)) + || getComparator().compareFlatKey(this.getLastKey(), + smallestScanKeyValue.getKey()) < 0; + return !nonOverLapping; + } + + public Map loadFileInfo() throws IOException { + Map fi = reader.loadFileInfo(); + + byte[] b = fi.get(StoreFile.BLOOM_FILTER_TYPE_KEY); + if (b != null) { + bloomFilterType = BloomType.valueOf(Bytes.toString(b)); + } + + lastBloomKey = fi.get(StoreFile.LAST_BLOOM_KEY); + byte[] cnt = fi.get(StoreFile.DELETE_FAMILY_COUNT); + if (cnt != null) { + deleteFamilyCnt = Bytes.toLong(cnt); + } + + return fi; + } + + public void loadBloomfilter() { + this.loadBloomfilter(BlockType.GENERAL_BLOOM_META); + this.loadBloomfilter(BlockType.DELETE_FAMILY_BLOOM_META); + } + + public void loadBloomfilter(BlockType blockType) { + try { + if (blockType == BlockType.GENERAL_BLOOM_META) { + if (this.generalBloomFilter != null) + return; // Bloom has been loaded + + DataInput bloomMeta = reader.getGeneralBloomFilterMetadata(); + if (bloomMeta != null) { + // sanity check for NONE Bloom filter + if (bloomFilterType == BloomType.NONE) { + throw new IOException( + "valid bloom filter type not found in FileInfo"); + } else { + generalBloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, + reader); + if (LOG.isTraceEnabled()) { + LOG.trace("Loaded " + bloomFilterType.toString() + " " + + generalBloomFilter.getClass().getSimpleName() + + " metadata for " + reader.getName()); + } + } + } + } else if (blockType == BlockType.DELETE_FAMILY_BLOOM_META) { + if (this.deleteFamilyBloomFilter != null) + return; // Bloom has been loaded + + DataInput bloomMeta = reader.getDeleteBloomFilterMetadata(); + if (bloomMeta != null) { + deleteFamilyBloomFilter = BloomFilterFactory.createFromMeta( + bloomMeta, reader); + LOG.info("Loaded Delete Family Bloom (" + + deleteFamilyBloomFilter.getClass().getSimpleName() + + ") metadata for " + reader.getName()); + } + } else { + throw new RuntimeException("Block Type: " + blockType.toString() + + "is not supported for Bloom filter"); + } + } catch (IOException e) { + LOG.error("Error reading bloom filter meta for " + blockType + + " -- proceeding without", e); + setBloomFilterFaulty(blockType); + } catch (IllegalArgumentException e) { + LOG.error("Bad bloom filter meta " + blockType + + " -- proceeding without", e); + setBloomFilterFaulty(blockType); + } + } + + private void setBloomFilterFaulty(BlockType blockType) { + if (blockType == BlockType.GENERAL_BLOOM_META) { + setGeneralBloomFilterFaulty(); + } else if (blockType == BlockType.DELETE_FAMILY_BLOOM_META) { + setDeleteFamilyBloomFilterFaulty(); + } + } + + /** + * The number of Bloom filter entries in this store file, or an estimate + * thereof, if the Bloom filter is not loaded. This always returns an upper + * bound of the number of Bloom filter entries. + * + * @return an estimate of the number of Bloom filter entries in this file + */ + public long getFilterEntries() { + return generalBloomFilter != null ? generalBloomFilter.getKeyCount() + : reader.getEntries(); + } + + public void setGeneralBloomFilterFaulty() { + generalBloomFilter = null; + } + + public void setDeleteFamilyBloomFilterFaulty() { + this.deleteFamilyBloomFilter = null; + } + + public byte[] getLastKey() { + return reader.getLastKey(); + } + + public byte[] getLastRowKey() { + return reader.getLastRowKey(); + } + + public byte[] midkey() throws IOException { + return reader.midkey(); + } + + public long length() { + return reader.length(); + } + + public long getTotalUncompressedBytes() { + return reader.getTrailer().getTotalUncompressedBytes(); + } + + public long getEntries() { + return reader.getEntries(); + } + + public long getDeleteFamilyCnt() { + return deleteFamilyCnt; + } + + public byte[] getFirstKey() { + return reader.getFirstKey(); + } + + public long indexSize() { + return reader.indexSize(); + } + + public BloomType getBloomFilterType() { + return this.bloomFilterType; + } + + public long getSequenceID() { + return sequenceID; + } + + public void setSequenceID(long sequenceID) { + this.sequenceID = sequenceID; + } + + public void setBulkLoaded(boolean bulkLoadResult) { + this.bulkLoadResult = bulkLoadResult; + } + + public boolean isBulkLoaded() { + return this.bulkLoadResult; + } + + BloomFilter getGeneralBloomFilter() { + return generalBloomFilter; + } + + long getUncompressedDataIndexSize() { + return reader.getTrailer().getUncompressedDataIndexSize(); + } + + public long getTotalBloomSize() { + if (generalBloomFilter == null) + return 0; + return generalBloomFilter.getByteSize(); + } + + public int getHFileVersion() { + return reader.getTrailer().getMajorVersion(); + } + + public int getHFileMinorVersion() { + return reader.getTrailer().getMinorVersion(); + } + + public HFile.Reader getHFileReader() { + return reader; + } + + void disableBloomFilterForTesting() { + generalBloomFilter = null; + this.deleteFamilyBloomFilter = null; + } + + public long getMaxTimestamp() { + return timeRange == null ? Long.MAX_VALUE : timeRange.getMax(); + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index 4055188..9e04af5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.HFileScanner; -import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; /** * KeyValueScanner adaptor over the Reader. It also provides hooks into @@ -48,7 +47,7 @@ public class StoreFileScanner implements KeyValueScanner { private static final Log LOG = LogFactory.getLog(HStore.class); // the reader it comes from: - private final StoreFile.Reader reader; + private final StoreFileReader reader; private final HFileScanner hfs; private Cell cur = null; private boolean closed = false; @@ -72,9 +71,21 @@ public class StoreFileScanner implements KeyValueScanner { /** * Implements a {@link KeyValueScanner} on top of the specified {@link HFileScanner} * @param hfs HFile scanner + * @deprecated: Since 1.3. Removed in 2.0. + * Use {@link #StoreFileScanner(StoreFileReader, HFileScanner, boolean, boolean, long)} ()}. */ + @Deprecated public StoreFileScanner(StoreFile.Reader reader, HFileScanner hfs, boolean useMVCC, boolean hasMVCC, long readPt) { + this((StoreFileReader)reader, hfs, useMVCC, hasMVCC, readPt); + } + + /** + * Implements a {@link KeyValueScanner} on top of the specified {@link HFileScanner} + * @param hfs HFile scanner + */ + public StoreFileScanner(StoreFileReader reader, HFileScanner hfs, boolean useMVCC, + boolean hasMVCC, long readPt) { this.readPt = readPt; this.reader = reader; this.hfs = hfs; @@ -120,7 +131,7 @@ public class StoreFileScanner implements KeyValueScanner { List scanners = new ArrayList( files.size()); for (StoreFile file : files) { - StoreFile.Reader r = file.createReader(canUseDrop); + StoreFileReader r = file.createStoreFileReader(canUseDrop); r.setReplicaStoreFile(isPrimaryReplica); StoreFileScanner scanner = r.getStoreFileScanner(cacheBlocks, usePread, isCompaction, readPt); @@ -390,7 +401,7 @@ public class StoreFileScanner implements KeyValueScanner { return true; } - Reader getReader() { + StoreFileReader getReader() { return reader; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java new file mode 100644 index 0000000..a9c599a --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java @@ -0,0 +1,467 @@ +package org.apache.hadoop.hbase.regionserver; + +import com.google.common.base.Preconditions; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.io.hfile.HFileContext; +import org.apache.hadoop.hbase.regionserver.compactions.Compactor; +import org.apache.hadoop.hbase.util.BloomFilterFactory; +import org.apache.hadoop.hbase.util.BloomFilterWriter; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.WritableUtils; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Arrays; + +/** + * A StoreFile writer. Use this to read/write HBase Store Files. It is package + * local because it is an implementation detail of the HBase regionserver. + */ +@InterfaceAudience.Private +public class StoreFileWriter implements Compactor.CellSink { + private static final Log LOG = LogFactory.getLog(StoreFileWriter.class.getName()); + + private final BloomFilterWriter generalBloomFilterWriter; + private final BloomFilterWriter deleteFamilyBloomFilterWriter; + private final BloomType bloomType; + private byte[] lastBloomKey; + private int lastBloomKeyOffset, lastBloomKeyLen; + private KVComparator kvComparator; + private Cell lastCell = null; + private long earliestPutTs = HConstants.LATEST_TIMESTAMP; + private Cell lastDeleteFamilyCell = null; + private long deleteFamilyCnt = 0; + + TimeRangeTracker timeRangeTracker = new TimeRangeTracker(); + /** + * timeRangeTrackerSet is used to figure if we were passed a filled-out TimeRangeTracker or not. + * When flushing a memstore, we set the TimeRangeTracker that it accumulated during updates to + * memstore in here into this Writer and use this variable to indicate that we do not need to + * recalculate the timeRangeTracker bounds; it was done already as part of add-to-memstore. + * A completed TimeRangeTracker is not set in cases of compactions when it is recalculated. + */ + boolean timeRangeTrackerSet = false; + + protected HFile.Writer writer; + + /** + * Creates an HFile.Writer that also write helpful meta data. + * @param fs file system to write to + * @param path file name to create + * @param conf user configuration + * @param comparator key comparator + * @param bloomType bloom filter setting + * @param maxKeys the expected maximum number of keys to be added. Was used + * for Bloom filter size in {@link HFile} format version 1. + * @param favoredNodes + * @param fileContext - The HFile context + * @throws IOException problem writing to FS + */ + public StoreFileWriter(FileSystem fs, Path path, + final Configuration conf, + CacheConfig cacheConf, + final KVComparator comparator, BloomType bloomType, long maxKeys, + InetSocketAddress[] favoredNodes, HFileContext fileContext) + throws IOException { + writer = HFile.getWriterFactory(conf, cacheConf) + .withPath(fs, path) + .withComparator(comparator) + .withFavoredNodes(favoredNodes) + .withFileContext(fileContext) + .create(); + + this.kvComparator = comparator; + + generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite( + conf, cacheConf, bloomType, + (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); + + if (generalBloomFilterWriter != null) { + this.bloomType = bloomType; + if (LOG.isTraceEnabled()) LOG.trace("Bloom filter type for " + path + ": " + + this.bloomType + ", " + generalBloomFilterWriter.getClass().getSimpleName()); + } else { + // Not using Bloom filters. + this.bloomType = BloomType.NONE; + } + + // initialize delete family Bloom filter when there is NO RowCol Bloom + // filter + if (this.bloomType != BloomType.ROWCOL) { + this.deleteFamilyBloomFilterWriter = BloomFilterFactory + .createDeleteBloomAtWrite(conf, cacheConf, + (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); + } else { + deleteFamilyBloomFilterWriter = null; + } + if (deleteFamilyBloomFilterWriter != null) { + if (LOG.isTraceEnabled()) LOG.trace("Delete Family Bloom filter type for " + path + ": " + + deleteFamilyBloomFilterWriter.getClass().getSimpleName()); + } + } + + /** + * Writes meta data. + * Call before {@link #close()} since its written as meta data to this file. + * @param maxSequenceId Maximum sequence id. + * @param majorCompaction True if this file is product of a major compaction + * @throws IOException problem writing to FS + */ + public void appendMetadata(final long maxSequenceId, final boolean majorCompaction) + throws IOException { + writer.appendFileInfo(StoreFile.MAX_SEQ_ID_KEY, Bytes.toBytes(maxSequenceId)); + writer.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY, + Bytes.toBytes(majorCompaction)); + appendTrackedTimestampsToMetadata(); + } + + /** + * Add TimestampRange and earliest put timestamp to Metadata + */ + public void appendTrackedTimestampsToMetadata() throws IOException { + appendFileInfo(StoreFile.TIMERANGE_KEY,WritableUtils.toByteArray(timeRangeTracker)); + appendFileInfo(StoreFile.EARLIEST_PUT_TS, Bytes.toBytes(earliestPutTs)); + } + + /** + * Set TimeRangeTracker. + * Called when flushing to pass us a pre-calculated TimeRangeTracker, one made during updates + * to memstore so we don't have to make one ourselves as Cells get appended. Call before first + * append. If this method is not called, we will calculate our own range of the Cells that + * comprise this StoreFile (and write them on the end as metadata). It is good to have this stuff + * passed because it is expensive to make. + */ + public void setTimeRangeTracker(final TimeRangeTracker trt) { + this.timeRangeTracker = trt; + timeRangeTrackerSet = true; + } + + /** + * Record the earlest Put timestamp. + * + * If the timeRangeTracker is not set, + * update TimeRangeTracker to include the timestamp of this key + * @param cell + */ + public void trackTimestamps(final Cell cell) { + if (KeyValue.Type.Put.getCode() == cell.getTypeByte()) { + earliestPutTs = Math.min(earliestPutTs, cell.getTimestamp()); + } + if (!timeRangeTrackerSet) { + timeRangeTracker.includeTimestamp(cell); + } + } + + private void appendGeneralBloomfilter(final Cell cell) throws IOException { + if (this.generalBloomFilterWriter != null) { + // only add to the bloom filter on a new, unique key + boolean newKey = true; + if (this.lastCell != null) { + switch(bloomType) { + case ROW: + newKey = ! kvComparator.matchingRows(cell, lastCell); + break; + case ROWCOL: + newKey = ! kvComparator.matchingRowColumn(cell, lastCell); + break; + case NONE: + newKey = false; + break; + default: + throw new IOException("Invalid Bloom filter type: " + bloomType + + " (ROW or ROWCOL expected)"); + } + } + if (newKey) { + /* + * http://2.bp.blogspot.com/_Cib_A77V54U/StZMrzaKufI/AAAAAAAAADo/ZhK7bGoJdMQ/s400/KeyValue.png + * Key = RowLen + Row + FamilyLen + Column [Family + Qualifier] + TimeStamp + * + * 2 Types of Filtering: + * 1. Row = Row + * 2. RowCol = Row + Qualifier + */ + byte[] bloomKey; + int bloomKeyOffset, bloomKeyLen; + + switch (bloomType) { + case ROW: + bloomKey = cell.getRowArray(); + bloomKeyOffset = cell.getRowOffset(); + bloomKeyLen = cell.getRowLength(); + break; + case ROWCOL: + // merge(row, qualifier) + // TODO: could save one buffer copy in case of compound Bloom + // filters when this involves creating a KeyValue + bloomKey = generalBloomFilterWriter.createBloomKey(cell.getRowArray(), + cell.getRowOffset(), cell.getRowLength(), cell.getQualifierArray(), + cell.getQualifierOffset(), cell.getQualifierLength()); + bloomKeyOffset = 0; + bloomKeyLen = bloomKey.length; + break; + default: + throw new IOException("Invalid Bloom filter type: " + bloomType + + " (ROW or ROWCOL expected)"); + } + generalBloomFilterWriter.add(bloomKey, bloomKeyOffset, bloomKeyLen); + if (lastBloomKey != null + && generalBloomFilterWriter.getComparator().compareFlatKey(bloomKey, + bloomKeyOffset, bloomKeyLen, lastBloomKey, + lastBloomKeyOffset, lastBloomKeyLen) <= 0) { + throw new IOException("Non-increasing Bloom keys: " + + Bytes.toStringBinary(bloomKey, bloomKeyOffset, bloomKeyLen) + + " after " + + Bytes.toStringBinary(lastBloomKey, lastBloomKeyOffset, + lastBloomKeyLen)); + } + lastBloomKey = bloomKey; + lastBloomKeyOffset = bloomKeyOffset; + lastBloomKeyLen = bloomKeyLen; + this.lastCell = cell; + } + } + } + + private void appendDeleteFamilyBloomFilter(final Cell cell) + throws IOException { + if (!CellUtil.isDeleteFamily(cell) && !CellUtil.isDeleteFamilyVersion(cell)) { + return; + } + + // increase the number of delete family in the store file + deleteFamilyCnt++; + if (null != this.deleteFamilyBloomFilterWriter) { + boolean newKey = true; + if (lastDeleteFamilyCell != null) { + newKey = !kvComparator.matchingRows(cell, lastDeleteFamilyCell); + } + if (newKey) { + this.deleteFamilyBloomFilterWriter.add(cell.getRowArray(), + cell.getRowOffset(), cell.getRowLength()); + this.lastDeleteFamilyCell = cell; + } + } + } + + public void append(final Cell cell) throws IOException { + appendGeneralBloomfilter(cell); + appendDeleteFamilyBloomFilter(cell); + writer.append(cell); + trackTimestamps(cell); + } + + public Path getPath() { + return this.writer.getPath(); + } + + boolean hasGeneralBloom() { + return this.generalBloomFilterWriter != null; + } + + /** + * For unit testing only. + * + * @return the Bloom filter used by this writer. + */ + BloomFilterWriter getGeneralBloomWriter() { + return generalBloomFilterWriter; + } + + private boolean closeBloomFilter(BloomFilterWriter bfw) throws IOException { + boolean haveBloom = (bfw != null && bfw.getKeyCount() > 0); + if (haveBloom) { + bfw.compactBloom(); + } + return haveBloom; + } + + private boolean closeGeneralBloomFilter() throws IOException { + boolean hasGeneralBloom = closeBloomFilter(generalBloomFilterWriter); + + // add the general Bloom filter writer and append file info + if (hasGeneralBloom) { + writer.addGeneralBloomFilter(generalBloomFilterWriter); + writer.appendFileInfo(StoreFile.BLOOM_FILTER_TYPE_KEY, + Bytes.toBytes(bloomType.toString())); + if (lastBloomKey != null) { + writer.appendFileInfo(StoreFile.LAST_BLOOM_KEY, Arrays.copyOfRange( + lastBloomKey, lastBloomKeyOffset, lastBloomKeyOffset + + lastBloomKeyLen)); + } + } + return hasGeneralBloom; + } + + private boolean closeDeleteFamilyBloomFilter() throws IOException { + boolean hasDeleteFamilyBloom = closeBloomFilter(deleteFamilyBloomFilterWriter); + + // add the delete family Bloom filter writer + if (hasDeleteFamilyBloom) { + writer.addDeleteFamilyBloomFilter(deleteFamilyBloomFilterWriter); + } + + // append file info about the number of delete family kvs + // even if there is no delete family Bloom. + writer.appendFileInfo(StoreFile.DELETE_FAMILY_COUNT, + Bytes.toBytes(this.deleteFamilyCnt)); + + return hasDeleteFamilyBloom; + } + + public void close() throws IOException { + boolean hasGeneralBloom = this.closeGeneralBloomFilter(); + boolean hasDeleteFamilyBloom = this.closeDeleteFamilyBloomFilter(); + + writer.close(); + + // Log final Bloom filter statistics. This needs to be done after close() + // because compound Bloom filters might be finalized as part of closing. + if (LOG.isTraceEnabled()) { + LOG.trace((hasGeneralBloom ? "" : "NO ") + "General Bloom and " + + (hasDeleteFamilyBloom ? "" : "NO ") + "DeleteFamily" + " was added to HFile " + + getPath()); + } + + } + + public void appendFileInfo(byte[] key, byte[] value) throws IOException { + writer.appendFileInfo(key, value); + } + + /** For use in testing. */ + HFile.Writer getHFileWriter() { + return writer; + } + + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ICAST_INTEGER_MULTIPLY_CAST_TO_LONG", + justification="Will not overflow") + public static class Builder { + private final Configuration conf; + private final CacheConfig cacheConf; + private final FileSystem fs; + + private KeyValue.KVComparator comparator = KeyValue.COMPARATOR; + private BloomType bloomType = BloomType.NONE; + private long maxKeyCount = 0; + private Path dir; + private Path filePath; + private InetSocketAddress[] favoredNodes; + private HFileContext fileContext; + + public Builder(Configuration conf, CacheConfig cacheConf, + FileSystem fs) { + this.conf = conf; + this.cacheConf = cacheConf; + this.fs = fs; + } + + /** + * Use either this method or {@link #withFilePath}, but not both. + * @param dir Path to column family directory. The directory is created if + * does not exist. The file is given a unique name within this + * directory. + * @return this (for chained invocation) + */ + public Builder withOutputDir(Path dir) { + Preconditions.checkNotNull(dir); + this.dir = dir; + return this; + } + + /** + * Use either this method or {@link #withOutputDir}, but not both. + * @param filePath the StoreFile path to write + * @return this (for chained invocation) + */ + public Builder withFilePath(Path filePath) { + Preconditions.checkNotNull(filePath); + this.filePath = filePath; + return this; + } + + /** + * @param favoredNodes an array of favored nodes or possibly null + * @return this (for chained invocation) + */ + public Builder withFavoredNodes(InetSocketAddress[] favoredNodes) { + this.favoredNodes = favoredNodes; + return this; + } + + public Builder withComparator(KeyValue.KVComparator comparator) { + Preconditions.checkNotNull(comparator); + this.comparator = comparator; + return this; + } + + public Builder withBloomType(BloomType bloomType) { + Preconditions.checkNotNull(bloomType); + this.bloomType = bloomType; + return this; + } + + /** + * @param maxKeyCount estimated maximum number of keys we expect to add + * @return this (for chained invocation) + */ + public Builder withMaxKeyCount(long maxKeyCount) { + this.maxKeyCount = maxKeyCount; + return this; + } + + public Builder withFileContext(HFileContext fileContext) { + this.fileContext = fileContext; + return this; + } + + public Builder withShouldDropCacheBehind(boolean shouldDropCacheBehind/*NOT USED!!*/) { + // TODO: HAS NO EFFECT!!! FIX!! + return this; + } + /** + * Create a store file writer. Client is responsible for closing file when + * done. If metadata, add BEFORE closing using + * {@link StoreFileWriter#appendMetadata}. + */ + public StoreFileWriter build() throws IOException { + if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) { + throw new IllegalArgumentException("Either specify parent directory " + + "or file path"); + } + + if (dir == null) { + dir = filePath.getParent(); + } + + if (!fs.exists(dir)) { + fs.mkdirs(dir); + } + + if (filePath == null) { + filePath = StoreFile.getUniqueFile(fs, dir); + if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) { + bloomType = BloomType.NONE; + } + } + + if (comparator == null) { + comparator = KeyValue.COMPARATOR; + } + return new StoreFileWriter(fs, filePath, + conf, cacheConf, comparator, bloomType, maxKeyCount, favoredNodes, fileContext); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java index 9b182a2..5ba7d33 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java @@ -61,7 +61,7 @@ abstract class StoreFlusher { public abstract List flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushSeqNum, MonitoredTask status, ThroughputController throughputController) throws IOException; - protected void finalizeWriter(StoreFile.Writer writer, long cacheFlushSeqNum, + protected void finalizeWriter(StoreFileWriter writer, long cacheFlushSeqNum, MonitoredTask status) throws IOException { // Write out the log sequence number that corresponds to this output // hfile. Also write current time in metadata as minFlushTime. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java index 3d4e990..f6cfa17 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java @@ -74,7 +74,7 @@ public class StoreUtils { long maxSize = -1L; StoreFile largestSf = null; for (StoreFile sf : candidates) { - StoreFile.Reader r = sf.getReader(); + StoreFileReader r = sf.getStoreFileReader(); if (r == null) continue; long size = r.length(); if (size > maxSize) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java index a0fbdbd..25ae21c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java @@ -28,7 +28,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.regionserver.StoreFile.Writer; import org.apache.hadoop.hbase.util.Bytes; /** @@ -41,7 +40,7 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { private static final Log LOG = LogFactory.getLog(StripeMultiFileWriter.class); protected final KVComparator comparator; - protected List existingWriters; + protected List existingWriters; protected List boundaries; /** Whether to write stripe metadata */ @@ -56,7 +55,7 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { } @Override - protected Collection writers() { + protected Collection writers() { return existingWriters; } @@ -71,7 +70,7 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { } @Override - protected void preCloseWriter(Writer writer) throws IOException { + protected void preCloseWriter(StoreFileWriter writer) throws IOException { if (doWriteStripeMetadata) { if (LOG.isDebugEnabled()) { LOG.debug("Write stripe metadata for " + writer.getPath().toString()); @@ -130,7 +129,7 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { * separate from all other such pairs. */ public static class BoundaryMultiWriter extends StripeMultiFileWriter { - private StoreFile.Writer currentWriter; + private StoreFileWriter currentWriter; private byte[] currentWriterEndKey; private Cell lastCell; @@ -148,7 +147,7 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { byte[] majorRangeFrom, byte[] majorRangeTo) throws IOException { super(comparator); this.boundaries = targetBoundaries; - this.existingWriters = new ArrayList(this.boundaries.size() - 1); + this.existingWriters = new ArrayList(this.boundaries.size() - 1); // "major" range (range for which all files are included) boundaries, if any, // must match some target boundaries, let's find them. assert (majorRangeFrom == null) == (majorRangeTo == null); @@ -271,7 +270,7 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { private byte[] right; private Cell lastCell; - private StoreFile.Writer currentWriter; + private StoreFileWriter currentWriter; protected byte[] lastRowInCurrentWriter = null; private long cellsInCurrentWriter = 0; private long cellsSeen = 0; @@ -291,7 +290,7 @@ public abstract class StripeMultiFileWriter extends AbstractMultiFileWriter { this.left = left; this.right = right; int preallocate = Math.min(this.targetCount, 64); - this.existingWriters = new ArrayList(preallocate); + this.existingWriters = new ArrayList(preallocate); this.boundaries = new ArrayList(preallocate + 1); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java index cd7288d..dd09681 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java @@ -369,7 +369,7 @@ public class StripeStoreFileManager private long getStripeFilesSize(int stripeIndex) { long result = 0; for (StoreFile sf : state.stripeFiles.get(stripeIndex)) { - result += sf.getReader().length(); + result += sf.getStoreFileReader().length(); } return result; } @@ -1004,7 +1004,7 @@ public class StripeStoreFileManager for (int i = 1; i < stripe.size(); ++i) { StoreFile sf = stripe.get(i); synchronized (sf) { - long fileTs = sf.getReader().getMaxTimestamp(); + long fileTs = sf.getStoreFileReader().getMaxTimestamp(); if (fileTs < maxTs && !filesCompacting.contains(sf)) { LOG.info("Found an expired store file: " + sf.getPath() + " whose maxTimeStamp is " + fileTs + ", which is below " + maxTs); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java index 0c3432c..789ca30 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java @@ -33,7 +33,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.monitoring.MonitoredTask; -import org.apache.hadoop.hbase.regionserver.StoreFile.Writer; import org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; @@ -109,8 +108,8 @@ public class StripeStoreFlusher extends StoreFlusher { final TimeRangeTracker tracker, final long kvCount) { return new StripeMultiFileWriter.WriterFactory() { @Override - public Writer createWriter() throws IOException { - StoreFile.Writer writer = store.createWriterInTmp( + public StoreFileWriter createWriter() throws IOException { + StoreFileWriter writer = store.createWriterInTmpDir( kvCount, store.getFamily().getCompression(), /* isCompaction = */ false, /* includeMVCCReadpoint = */ true, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java index ef39a6c..d6f975a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/AbstractMultiOutputCompactor.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.regionserver.AbstractMultiFileWriter; import org.apache.hadoop.hbase.regionserver.AbstractMultiFileWriter.WriterFactory; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.Store; -import org.apache.hadoop.hbase.regionserver.StoreFile.Writer; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.regionserver.StoreScanner; /** @@ -50,7 +50,7 @@ public abstract class AbstractMultiOutputCompactor { new Predicate() { @Override public boolean apply(StoreFile sf) { - return sf.getReader() != null; + return sf.getStoreFileReader() != null; } }), new Function() { @Override public String apply(StoreFile sf) { return StringUtils.humanReadableInt( - (sf.getReader() == null) ? 0 : sf.getReader().length()); + (sf.getStoreFileReader() == null) ? 0 : sf.getStoreFileReader().length()); } })); @@ -237,7 +237,7 @@ public class CompactionRequest implements Comparable { private void recalculateSize() { long sz = 0; for (StoreFile sf : this.filesToCompact) { - Reader r = sf.getReader(); + StoreFileReader r = sf.getStoreFileReader(); sz += r == null ? 0 : r.length(); } this.totalSize = sz; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index c6fc0c6..03a0243 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -47,7 +47,8 @@ import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; -import org.apache.hadoop.hbase.regionserver.StoreFile.Writer; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreFileReader; import org.apache.hadoop.hbase.regionserver.StoreFileScanner; import org.apache.hadoop.hbase.regionserver.StoreScanner; import org.apache.hadoop.hbase.regionserver.compactions.Compactor.CellSink; @@ -140,7 +141,7 @@ public abstract class Compactor { } long seqNum = file.getMaxSequenceId(); fd.maxSeqId = Math.max(fd.maxSeqId, seqNum); - StoreFile.Reader r = file.getReader(); + StoreFileReader r = file.getStoreFileReader(); if (r == null) { LOG.warn("Null reader for " + file.getPath()); continue; @@ -246,10 +247,10 @@ public abstract class Compactor { * @return Writer for a new StoreFile in the tmp dir. * @throws IOException if creation failed */ - protected Writer createTmpWriter(FileDetails fd, boolean shouldDropBehind) throws IOException { + protected StoreFileWriter createTmpWriter(FileDetails fd, boolean shouldDropBehind) throws IOException { // When all MVCC readpoints are 0, don't write them. // See HBASE-8166, HBASE-12600, and HBASE-13389. - return store.createWriterInTmp(fd.maxKeyCount, this.compactionCompression, + return store.createWriterInTmpDir(fd.maxKeyCount, this.compactionCompression, /* isCompaction = */true, /* includeMVCCReadpoint = */fd.maxMVCCReadpoint > 0, /* includesTags = */fd.maxTagsLength > 0, shouldDropBehind); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java index 9759d2b..a9479d8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; -import org.apache.hadoop.hbase.regionserver.StoreFile.Writer; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.security.User; @@ -41,17 +41,17 @@ import com.google.common.collect.Lists; * {@link #compact(CompactionRequest, ThroughputController, User)} */ @InterfaceAudience.Private -public class DefaultCompactor extends Compactor { +public class DefaultCompactor extends Compactor { private static final Log LOG = LogFactory.getLog(DefaultCompactor.class); public DefaultCompactor(final Configuration conf, final Store store) { super(conf, store); } - private final CellSinkFactory writerFactory = new CellSinkFactory() { + private final CellSinkFactory writerFactory = new CellSinkFactory() { @Override - public Writer createWriter(InternalScanner scanner, + public StoreFileWriter createWriter(InternalScanner scanner, org.apache.hadoop.hbase.regionserver.compactions.Compactor.FileDetails fd, boolean shouldDropBehind) throws IOException { return createTmpWriter(fd, shouldDropBehind); @@ -84,7 +84,7 @@ public class DefaultCompactor extends Compactor { } @Override - protected List commitWriter(Writer writer, FileDetails fd, + protected List commitWriter(StoreFileWriter writer, FileDetails fd, CompactionRequest request) throws IOException { List newFiles = Lists.newArrayList(writer.getPath()); writer.appendMetadata(fd.maxSeqId, request.isAllFiles()); @@ -93,7 +93,7 @@ public class DefaultCompactor extends Compactor { } @Override - protected void abortWriter(Writer writer) throws IOException { + protected void abortWriter(StoreFileWriter writer) throws IOException { Path leftoverFile = writer.getPath(); try { writer.close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java index f0cb5d2..2a557f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java @@ -144,13 +144,13 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { /** * Find the total size of a list of store files. * @param potentialMatchFiles StoreFile list. - * @return Sum of StoreFile.getReader().length(); + * @return Sum of StoreFile.getStoreFileReader().length(); */ private long getTotalStoreSize(final List potentialMatchFiles) { long size = 0; for (StoreFile s:potentialMatchFiles) { - size += s.getReader().length(); + size += s.getStoreFileReader().length(); } return size; } @@ -171,7 +171,7 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { long totalFileSize = getTotalStoreSize(files); for (StoreFile file : files) { - long singleFileSize = file.getReader().length(); + long singleFileSize = file.getStoreFileReader().length(); long sumAllOtherFileSizes = totalFileSize - singleFileSize; if (singleFileSize > sumAllOtherFileSizes * currentRatio) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java index d339898..25f7a4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java @@ -101,7 +101,7 @@ public class FIFOCompactionPolicy extends ExploringCompactionPolicy { long currentTime = EnvironmentEdgeManager.currentTime(); for(StoreFile sf: files){ // Check MIN_VERSIONS is in HStore removeUnneededFiles - Long maxTs = sf.getReader().getMaxTimestamp(); + Long maxTs = sf.getStoreFileReader().getMaxTimestamp(); long maxTtl = storeConfigInfo.getStoreFileTtl(); if(maxTs == null || maxTtl == Long.MAX_VALUE @@ -120,7 +120,7 @@ public class FIFOCompactionPolicy extends ExploringCompactionPolicy { Collection expiredStores = new ArrayList(); for(StoreFile sf: files){ // Check MIN_VERSIONS is in HStore removeUnneededFiles - Long maxTs = sf.getReader().getMaxTimestamp(); + Long maxTs = sf.getStoreFileReader().getMaxTimestamp(); long maxTtl = storeConfigInfo.getStoreFileTtl(); if(maxTs == null || maxTtl == Long.MAX_VALUE diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java index 7b812cd..8a24bee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RatioBasedCompactionPolicy.java @@ -162,7 +162,7 @@ public class RatioBasedCompactionPolicy extends SortedCompactionPolicy { long[] sumSize = new long[countOfFiles]; for (int i = countOfFiles - 1; i >= 0; --i) { StoreFile file = candidates.get(i); - fileSizes[i] = file.getReader().length(); + fileSizes[i] = file.getStoreFileReader().length(); // calculate the sum of fileSizes[i,i+maxFilesToCompact-1) for algo int tooFar = i + comConf.getMaxFilesToCompact() - 1; sumSize[i] = fileSizes[i] diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java index 77b0af8..387ccc6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java @@ -178,7 +178,7 @@ public abstract class SortedCompactionPolicy extends CompactionPolicy { boolean mayUseOffpeak) { int pos = 0; while (pos < candidates.size() && !candidates.get(pos).isReference() - && (candidates.get(pos).getReader().length() > comConf.getMaxCompactSize(mayUseOffpeak))) { + && (candidates.get(pos).getStoreFileReader().length() > comConf.getMaxCompactSize(mayUseOffpeak))) { ++pos; } if (pos > 0) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java index 5d2c907..00b64f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java @@ -204,7 +204,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { if (selection.isEmpty()) continue; long size = 0; for (StoreFile sf : selection) { - size += sf.getReader().length(); + size += sf.getStoreFileReader().length(); } if (bqSelection == null || selection.size() > bqSelection.size() || (selection.size() == bqSelection.size() && size < bqTotalSize)) { @@ -315,7 +315,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { ArrayList> stripes = si.getStripes(); OUTER: for (int i = 0; i < stripes.size(); ++i) { for (StoreFile storeFile : stripes.get(i)) { - if (storeFile.getReader().getMaxTimestamp() < timestampCutoff) continue; + if (storeFile.getStoreFileReader().getMaxTimestamp() < timestampCutoff) continue; // Found non-expired file, this stripe has to stay. if (length > bestLength) { bestStart = start; @@ -358,7 +358,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { private static long getTotalKvCount(final Collection candidates) { long totalSize = 0; for (StoreFile storeFile : candidates) { - totalSize += storeFile.getReader().getEntries(); + totalSize += storeFile.getStoreFileReader().getEntries(); } return totalSize; } @@ -366,7 +366,7 @@ public class StripeCompactionPolicy extends CompactionPolicy { public static long getTotalFileSize(final Collection candidates) { long totalSize = 0; for (StoreFile storeFile : candidates) { - totalSize += storeFile.getReader().length(); + totalSize += storeFile.getStoreFileReader().length(); } return totalSize; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java index 25e98a2..58e247f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex; import org.apache.hadoop.hbase.io.hfile.InlineBlockWriter; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.io.Writable; /** @@ -154,7 +155,7 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase * Adds a Bloom filter key. This key must be greater than the previous key, * as defined by the comparator this compound Bloom filter is configured * with. For efficiency, key monotonicity is not checked here. See - * {@link org.apache.hadoop.hbase.regionserver.StoreFile.Writer#append( + * {@link StoreFileWriter#append( * org.apache.hadoop.hbase.Cell)} for the details of deduplication. */ @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java index dbcf2e9..6ba387f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java @@ -64,7 +64,7 @@ import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; -import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; +import org.apache.hadoop.hbase.regionserver.StoreFileReader; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; @@ -702,18 +702,45 @@ public class SimpleRegionObserver extends BaseRegionObserver { ctPostWALRestoreDeprecated.incrementAndGet(); } + /** + * @deprecated Since 1.3. Removed in 2.0. Use {@link #preStoreFileReaderOpen(ObserverContext, + * FileSystem, Path, FSDataInputStreamWrapper, long, CacheConfig, Reference, StoreFileReader)}. + */ + @Deprecated + @Override + public StoreFile.Reader preStoreFileReaderOpen(ObserverContext ctx, + FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, + Reference r, StoreFile.Reader reader) throws IOException { + ctPreStoreFileReaderOpen.incrementAndGet(); + return null; + } + + /** + * @deprecated Since 1.3. Removed in 2.0. Use {@link #postStoreFileReaderOpen(ObserverContext, + * FileSystem, Path, FSDataInputStreamWrapper, long, CacheConfig, Reference, + * StoreFileReader)}. + */ + @Deprecated + @Override + public StoreFile.Reader postStoreFileReaderOpen(ObserverContext ctx, + FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, + Reference r, StoreFile.Reader reader) throws IOException { + ctPostStoreFileReaderOpen.incrementAndGet(); + return reader; + } + @Override - public Reader preStoreFileReaderOpen(ObserverContext ctx, + public StoreFileReader preStoreFileReaderOpen(ObserverContext ctx, FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, - Reference r, Reader reader) throws IOException { + Reference r, StoreFileReader reader) throws IOException { ctPreStoreFileReaderOpen.incrementAndGet(); return null; } @Override - public Reader postStoreFileReaderOpen(ObserverContext ctx, + public StoreFileReader postStoreFileReaderOpen(ObserverContext ctx, FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, - Reference r, Reader reader) throws IOException { + Reference r, StoreFileReader reader) throws IOException { ctPostStoreFileReaderOpen.incrementAndGet(); return reader; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index de3d1f3..b68fa19 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.Durability; @@ -51,7 +52,6 @@ import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.Region; -import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; @@ -348,7 +348,7 @@ public class TestCacheOnWrite { .withBlockSize(DATA_BLOCK_SIZE) .withDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()) .withIncludesTags(useTags).build(); - StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, cacheConf, fs) + StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs) .withOutputDir(storeFileParentDir).withComparator(KeyValue.COMPARATOR) .withFileContext(meta) .withBloomType(BLOOM_TYPE).withMaxKeyCount(NUM_KV).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java index 30e49c0..18e74f0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java @@ -30,9 +30,9 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.fs.HFileSystem; -import org.apache.hadoop.hbase.regionserver.StoreFile; import org.junit.Before; import org.junit.Test; @@ -109,7 +109,7 @@ public class TestPrefetch { HFileContext meta = new HFileContextBuilder() .withBlockSize(DATA_BLOCK_SIZE) .build(); - StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, cacheConf, fs) + StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs) .withOutputDir(storeFileParentDir) .withComparator(KeyValue.COMPARATOR) .withFileContext(meta) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java index ac92f4f..19578a6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.Bytes; @@ -114,8 +114,8 @@ public class TestSeekBeforeWithInlineBlocks { .withBlockSize(DATA_BLOCK_SIZE) .build(); - StoreFile.Writer storeFileWriter = - new StoreFile.WriterBuilder(conf, cacheConf, fs) + StoreFileWriter storeFileWriter = + new StoreFileWriter.Builder(conf, cacheConf, fs) .withFilePath(hfilePath) .withFileContext(meta) .withBloomType(bloomType) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java index c3f7b82..a311501 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java @@ -185,7 +185,7 @@ public class CreateRandomStoreFile { HFileContext meta = new HFileContextBuilder().withCompression(compr) .withBlockSize(blockSize).build(); - StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, + StoreFileWriter sfw = new StoreFileWriter.Builder(conf, new CacheConfig(conf), fs) .withOutputDir(outputDir) .withBloomType(bloomType) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java index 1927334..fadb541 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java @@ -595,7 +595,7 @@ public class DataBlockEncodingTool { StoreFile hsf = new StoreFile(fs, path, conf, cacheConf, BloomType.NONE); - StoreFile.Reader reader = hsf.createReader(); + StoreFileReader reader = hsf.createStoreFileReader(); reader.loadFileInfo(); KeyValueScanner scanner = reader.getStoreFileScanner(true, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java index 6c801bf..2c61be0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java @@ -62,7 +62,7 @@ public class EncodedSeekPerformanceTest { StoreFile storeFile = new StoreFile(testingUtility.getTestFileSystem(), path, configuration, cacheConf, BloomType.NONE); - StoreFile.Reader reader = storeFile.createReader(); + StoreFileReader reader = storeFile.createStoreFileReader(); StoreFileScanner scanner = reader.getStoreFileScanner(true, false); Cell current; @@ -94,7 +94,7 @@ public class EncodedSeekPerformanceTest { long totalSize = 0; - StoreFile.Reader reader = storeFile.createReader(); + StoreFileReader reader = storeFile.createStoreFileReader(); StoreFileScanner scanner = reader.getStoreFileScanner(true, false); long startReadingTime = System.nanoTime(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java index 70623e9..8d2098d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -130,12 +129,20 @@ public class MockStoreFile extends StoreFile { return hdfsBlocksDistribution; } - @Override + /** + * @deprecated Since 1.3. Removed in 2.0 Use {@link #getStoreFileReader()}. + */ + @Deprecated public StoreFile.Reader getReader() { + return (StoreFile.Reader) getStoreFileReader(); + } + + @Override + public StoreFileReader getStoreFileReader() { final long len = this.length; final TimeRangeTracker timeRangeTracker = this.timeRangeTracker; final long entries = this.entryCount; - return new StoreFile.Reader() { + return new StoreFileReader() { @Override public long length() { return len; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java index 46a9cb3..5d666b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java @@ -207,7 +207,7 @@ public class TestCacheOnWriteInSchema { @Test public void testCacheOnWriteInSchema() throws IOException { // Write some random data into the store - StoreFile.Writer writer = store.createWriterInTmp(Integer.MAX_VALUE, + StoreFileWriter writer = store.createWriterInTmpDir(Integer.MAX_VALUE, HFile.DEFAULT_COMPRESSION_ALGORITHM, false, true, false); writeStoreFile(writer); writer.close(); @@ -220,7 +220,7 @@ public class TestCacheOnWriteInSchema { BlockCache cache = cacheConf.getBlockCache(); StoreFile sf = new StoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL); - HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader(); + HFileReaderV2 reader = (HFileReaderV2) sf.createStoreFileReader().getHFileReader(); try { // Open a scanner with (on read) caching disabled HFileScanner scanner = reader.getScanner(false, false); @@ -267,7 +267,7 @@ public class TestCacheOnWriteInSchema { } } - private void writeStoreFile(StoreFile.Writer writer) throws IOException { + private void writeStoreFile(StoreFileWriter writer) throws IOException { final int rowLen = 32; for (int i = 0; i < NUM_KV; ++i) { byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index 72436e9..f021f7d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -214,7 +214,7 @@ public class TestCompaction { int count = 0; for (StoreFile f: this.r.stores. get(COLUMN_FAMILY_TEXT).getStorefiles()) { - HFileScanner scanner = f.getReader().getScanner(false, false); + HFileScanner scanner = f.getStoreFileReader().getScanner(false, false); if (!scanner.seekTo()) { continue; } @@ -564,9 +564,9 @@ public class TestCompaction { private static StoreFile createFile() throws Exception { StoreFile sf = mock(StoreFile.class); when(sf.getPath()).thenReturn(new Path("file")); - StoreFile.Reader r = mock(StoreFile.Reader.class); + StoreFileReader r = mock(StoreFileReader.class); when(r.length()).thenReturn(10L); - when(sf.getReader()).thenReturn(r); + when(sf.getStoreFileReader()).thenReturn(r); return sf; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java index 24b3667..1a0ad71 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionPolicy.java @@ -176,7 +176,7 @@ public class TestCompactionPolicy { long[] getSizes(List sfList) { long[] aNums = new long[sfList.size()]; for (int i = 0; i < sfList.size(); ++i) { - aNums[i] = sfList.get(i).getReader().length(); + aNums[i] = sfList.get(i).getStoreFileReader().length(); } return aNums; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java index 0129fad..6920322 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java @@ -198,7 +198,7 @@ public class TestCompoundBloomFilter { private void readStoreFile(int t, BloomType bt, List kvs, Path sfPath) throws IOException { StoreFile sf = new StoreFile(fs, sfPath, conf, cacheConf, bt); - StoreFile.Reader r = sf.createReader(); + StoreFileReader r = sf.createStoreFileReader(); final boolean pread = true; // does not really matter StoreFileScanner scanner = r.getStoreFileScanner(true, pread); @@ -298,7 +298,7 @@ public class TestCompoundBloomFilter { conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true); cacheConf = new CacheConfig(conf); HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCK_SIZES[t]).build(); - StoreFile.Writer w = new StoreFile.WriterBuilder(conf, cacheConf, fs) + StoreFileWriter w = new StoreFileWriter.Builder(conf, cacheConf, fs) .withOutputDir(TEST_UTIL.getDataTestDir()) .withBloomType(bt) .withFileContext(meta) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java index cc509a4..c529296 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java @@ -81,7 +81,7 @@ public class TestFSErrorsExposed { FileSystem fs = new HFileSystem(faultyfs); CacheConfig cacheConf = new CacheConfig(util.getConfiguration()); HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build(); - StoreFile.Writer writer = new StoreFile.WriterBuilder( + StoreFileWriter writer = new StoreFileWriter.Builder( util.getConfiguration(), cacheConf, hfs) .withOutputDir(hfilePath) .withFileContext(meta) @@ -92,7 +92,7 @@ public class TestFSErrorsExposed { StoreFile sf = new StoreFile(fs, writer.getPath(), util.getConfiguration(), cacheConf, BloomType.NONE); - StoreFile.Reader reader = sf.createReader(); + StoreFileReader reader = sf.createStoreFileReader(); HFileScanner scanner = reader.getScanner(false, true); FaultyInputStream inStream = faultyfs.inStreams.get(0).get(); @@ -131,7 +131,7 @@ public class TestFSErrorsExposed { HFileSystem fs = new HFileSystem(faultyfs); CacheConfig cacheConf = new CacheConfig(util.getConfiguration()); HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build(); - StoreFile.Writer writer = new StoreFile.WriterBuilder( + StoreFileWriter writer = new StoreFileWriter.Builder( util.getConfiguration(), cacheConf, hfs) .withOutputDir(hfilePath) .withFileContext(meta) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index e838617..087fa5b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -4203,7 +4203,7 @@ public class TestHRegion { HStore store = (HStore) region.getStore(fam1); Collection storeFiles = store.getStorefiles(); for (StoreFile storefile : storeFiles) { - StoreFile.Reader reader = storefile.getReader(); + StoreFileReader reader = storefile.getStoreFileReader(); reader.loadFileInfo(); reader.loadBloomfilter(); assertEquals(num_unique_rows * duplicate_multiplier, reader.getEntries()); @@ -4215,7 +4215,7 @@ public class TestHRegion { // after compaction storeFiles = store.getStorefiles(); for (StoreFile storefile : storeFiles) { - StoreFile.Reader reader = storefile.getReader(); + StoreFileReader reader = storefile.getStoreFileReader(); reader.loadFileInfo(); reader.loadBloomfilter(); assertEquals(num_unique_rows * duplicate_multiplier * num_storefiles, reader.getEntries()); @@ -6044,7 +6044,7 @@ public class TestHRegion { Collection storefiles = store.getStorefiles(); for (StoreFile sf : storefiles) { assertFalse("Tags should not be present " - ,sf.getReader().getHFileReader().getFileContext().isIncludesTags()); + ,sf.getStoreFileReader().getHFileReader().getFileContext().isIncludesTags()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java index 3edf7c7..2874b76 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java @@ -351,7 +351,7 @@ public class TestMajorCompaction { int count1 = 0; int count2 = 0; for (StoreFile f: r.getStore(COLUMN_FAMILY_TEXT).getStorefiles()) { - HFileScanner scanner = f.getReader().getScanner(false, false); + HFileScanner scanner = f.getStoreFileReader().getScanner(false, false); scanner.seekTo(); do { byte [] row = scanner.getKeyValue().getRow(); @@ -370,7 +370,7 @@ public class TestMajorCompaction { private int count() throws IOException { int count = 0; for (StoreFile f: r.getStore(COLUMN_FAMILY_TEXT).getStorefiles()) { - HFileScanner scanner = f.getReader().getScanner(false, false); + HFileScanner scanner = f.getStoreFileReader().getScanner(false, false); if (!scanner.seekTo()) { continue; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java index 6693ca5..a29b73f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java @@ -485,7 +485,7 @@ public class TestRegionReplicas { LOG.debug(getRS().getFileSystem().exists(sf.getPath())); Assert.assertFalse(getRS().getFileSystem().exists(sf.getPath())); - HFileScanner scanner = sf.getReader().getScanner(false, false); + HFileScanner scanner = sf.getStoreFileReader().getScanner(false, false); scanner.seekTo(); do { keys++; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java index 7682024..c70d6b4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java @@ -146,7 +146,7 @@ public class TestScanWithBloomError { } }); - StoreFile.Reader lastStoreFileReader = null; + StoreFileReader lastStoreFileReader = null; for (StoreFileScanner sfScanner : scanners) lastStoreFileReader = sfScanner.getReader(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java index 414c663..553502d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java @@ -265,7 +265,8 @@ public class TestStore { init(name.getMethodName(), conf, hcd); // Test createWriterInTmp() - StoreFile.Writer writer = store.createWriterInTmp(4, hcd.getCompression(), false, true, false); + StoreFileWriter writer = store.createWriterInTmpDir(4, hcd.getCompression(), false, true, + false); Path path = writer.getPath(); writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1))); writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2))); @@ -335,7 +336,7 @@ public class TestStore { assertEquals(storeFileNum - i, sfs.size()); // Ensure only non-expired files remain. for (StoreFile sf : sfs) { - assertTrue(sf.getReader().getMaxTimestamp() >= (edge.currentTime() - storeTtl)); + assertTrue(sf.getStoreFileReader().getMaxTimestamp() >= (edge.currentTime() - storeTtl)); } } else { assertEquals(storeFileNum, sfs.size()); @@ -350,7 +351,7 @@ public class TestStore { if (minVersions == 0) { assertEquals(1, sfs.size()); } - long ts = sfs.iterator().next().getReader().getMaxTimestamp(); + long ts = sfs.iterator().next().getStoreFileReader().getMaxTimestamp(); assertTrue(ts < (edge.currentTime() - storeTtl)); for (StoreFile sf : sfs) { @@ -432,7 +433,7 @@ public class TestStore { Configuration c = HBaseConfiguration.create(); FileSystem fs = FileSystem.get(c); HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build(); - StoreFile.Writer w = new StoreFile.WriterBuilder(c, new CacheConfig(c), + StoreFileWriter w = new StoreFileWriter.Builder(c, new CacheConfig(c), fs) .withOutputDir(storedir) .withFileContext(meta) @@ -1007,7 +1008,7 @@ public class TestStore { Configuration c = TEST_UTIL.getConfiguration(); FileSystem fs = FileSystem.get(c); HFileContext fileContext = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build(); - StoreFile.Writer w = new StoreFile.WriterBuilder(c, new CacheConfig(c), + StoreFileWriter w = new StoreFileWriter.Builder(c, new CacheConfig(c), fs) .withOutputDir(storedir) .withFileContext(fileContext) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java index da4593b..b68477f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java @@ -66,7 +66,6 @@ import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; /** @@ -104,7 +103,7 @@ public class TestStoreFile extends HBaseTestCase { conf, fs, new Path(this.testDir, hri.getTable().getNameAsString()), hri); HFileContext meta = new HFileContextBuilder().withBlockSize(2*1024).build(); - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(regionFs.createTempName()) .withFileContext(meta) .build(); @@ -116,7 +115,7 @@ public class TestStoreFile extends HBaseTestCase { checkHalfHFile(regionFs, sf); } - private void writeStoreFile(final StoreFile.Writer writer) throws IOException { + private void writeStoreFile(final StoreFileWriter writer) throws IOException { writeStoreFile(writer, Bytes.toBytes(getName()), Bytes.toBytes(getName())); } @@ -129,7 +128,7 @@ public class TestStoreFile extends HBaseTestCase { * @param writer * @throws IOException */ - public static void writeStoreFile(final StoreFile.Writer writer, byte[] fam, byte[] qualifier) + public static void writeStoreFile(final StoreFileWriter writer, byte[] fam, byte[] qualifier) throws IOException { long now = System.currentTimeMillis(); try { @@ -156,7 +155,7 @@ public class TestStoreFile extends HBaseTestCase { HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(regionFs.createTempName()) .withFileContext(meta) .build(); @@ -165,7 +164,7 @@ public class TestStoreFile extends HBaseTestCase { Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath()); StoreFile hsf = new StoreFile(this.fs, hsfPath, conf, cacheConf, BloomType.NONE); - StoreFile.Reader reader = hsf.createReader(); + StoreFileReader reader = hsf.createStoreFileReader(); // Split on a row, not in middle of row. Midkey returned by reader // may be in middle of row. Create new one with empty column and // timestamp. @@ -183,7 +182,7 @@ public class TestStoreFile extends HBaseTestCase { BloomType.NONE); // Now confirm that I can read from the reference and that it only gets // keys from top half of the file. - HFileScanner s = refHsf.createReader().getScanner(false, false); + HFileScanner s = refHsf.createStoreFileReader().getScanner(false, false); for(boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next();) { ByteBuffer bb = s.getKey(); kv = KeyValue.createKeyValueFromKey(bb); @@ -205,7 +204,7 @@ public class TestStoreFile extends HBaseTestCase { HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(regionFs.createTempName()) .withFileContext(meta) .build(); @@ -225,7 +224,7 @@ public class TestStoreFile extends HBaseTestCase { // Now confirm that I can read from the link int count = 1; - HFileScanner s = hsf.createReader().getScanner(false, false); + HFileScanner s = hsf.createStoreFileReader().getScanner(false, false); s.seekTo(); while (s.next()) { count++; @@ -234,7 +233,7 @@ public class TestStoreFile extends HBaseTestCase { } public void testEmptyStoreFileRestrictKeyRanges() throws Exception { - StoreFile.Reader reader = mock(StoreFile.Reader.class); + StoreFileReader reader = mock(StoreFileReader.class); Store store = mock(Store.class); HColumnDescriptor hcd = mock(HColumnDescriptor.class); byte[] cf = Bytes.toBytes("ty"); @@ -263,7 +262,7 @@ public class TestStoreFile extends HBaseTestCase { HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. //// - StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(testConf, cacheConf, this.fs) .withFilePath(regionFs.createTempName()) .withFileContext(meta) .build(); @@ -286,7 +285,7 @@ public class TestStoreFile extends HBaseTestCase { HRegionInfo splitHriA = new HRegionInfo(hri.getTable(), null, SPLITKEY); HRegionInfo splitHriB = new HRegionInfo(hri.getTable(), SPLITKEY, null); StoreFile f = new StoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE); - f.createReader(); + f.createStoreFileReader(); Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true); // top Path pathB = splitStoreFile(cloneRegionFs, splitHriB, TEST_FAMILY, f, SPLITKEY, false);// bottom f.closeReader(true); @@ -302,7 +301,7 @@ public class TestStoreFile extends HBaseTestCase { // Now confirm that I can read from the ref to link int count = 1; - HFileScanner s = hsfA.createReader().getScanner(false, false); + HFileScanner s = hsfA.createStoreFileReader().getScanner(false, false); s.seekTo(); while (s.next()) { count++; @@ -314,7 +313,7 @@ public class TestStoreFile extends HBaseTestCase { BloomType.NONE); // Now confirm that I can read from the ref to link - HFileScanner sB = hsfB.createReader().getScanner(false, false); + HFileScanner sB = hsfB.createStoreFileReader().getScanner(false, false); sB.seekTo(); //count++ as seekTo() will advance the scanner @@ -329,7 +328,7 @@ public class TestStoreFile extends HBaseTestCase { private void checkHalfHFile(final HRegionFileSystem regionFs, final StoreFile f) throws IOException { - byte [] midkey = f.createReader().midkey(); + byte [] midkey = f.createStoreFileReader().midkey(); KeyValue midKV = KeyValue.createKeyValueFromKey(midkey); byte [] midRow = midKV.getRow(); // Create top split. @@ -341,10 +340,10 @@ public class TestStoreFile extends HBaseTestCase { midRow, null); Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false); // Make readers on top and bottom. - StoreFile.Reader top = new StoreFile( - this.fs, topPath, conf, cacheConf, BloomType.NONE).createReader(); - StoreFile.Reader bottom = new StoreFile( - this.fs, bottomPath, conf, cacheConf, BloomType.NONE).createReader(); + StoreFileReader top = new StoreFile( + this.fs, topPath, conf, cacheConf, BloomType.NONE).createStoreFileReader(); + StoreFileReader bottom = new StoreFile( + this.fs, bottomPath, conf, cacheConf, BloomType.NONE).createStoreFileReader(); ByteBuffer previous = null; LOG.info("Midkey: " + midKV.toString()); ByteBuffer bbMidkeyBytes = ByteBuffer.wrap(midkey); @@ -402,7 +401,7 @@ public class TestStoreFile extends HBaseTestCase { assertNull(bottomPath); - top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE).createReader(); + top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE).createStoreFileReader(); // Now read from the top. first = true; topScanner = top.getScanner(false, false); @@ -437,7 +436,7 @@ public class TestStoreFile extends HBaseTestCase { bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false); assertNull(topPath); bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf, - BloomType.NONE).createReader(); + BloomType.NONE).createStoreFileReader(); first = true; bottomScanner = bottom.getScanner(false, false); while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || @@ -471,7 +470,7 @@ public class TestStoreFile extends HBaseTestCase { private static final String localFormatter = "%010d"; - private void bloomWriteRead(StoreFile.Writer writer, FileSystem fs) throws Exception { + private void bloomWriteRead(StoreFileWriter writer, FileSystem fs) throws Exception { float err = conf.getFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, 0); Path f = writer.getPath(); long now = System.currentTimeMillis(); @@ -483,7 +482,7 @@ public class TestStoreFile extends HBaseTestCase { } writer.close(); - StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf); + StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf); reader.loadFileInfo(); reader.loadBloomfilter(); StoreFileScanner scanner = reader.getStoreFileScanner(false, false); @@ -531,7 +530,7 @@ public class TestStoreFile extends HBaseTestCase { .withChecksumType(CKTYPE) .withBytesPerCheckSum(CKBYTES).build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(f) .withBloomType(BloomType.ROW) .withMaxKeyCount(2000) @@ -554,7 +553,7 @@ public class TestStoreFile extends HBaseTestCase { .withChecksumType(CKTYPE) .withBytesPerCheckSum(CKBYTES).build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(f) .withMaxKeyCount(2000) .withFileContext(meta) @@ -570,7 +569,7 @@ public class TestStoreFile extends HBaseTestCase { } writer.close(); - StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf); + StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf); reader.loadFileInfo(); reader.loadBloomfilter(); @@ -607,7 +606,7 @@ public class TestStoreFile extends HBaseTestCase { Path f = new Path(ROOT_DIR, getName()); HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(f) .withFileContext(meta) .build(); @@ -615,7 +614,7 @@ public class TestStoreFile extends HBaseTestCase { writeStoreFile(writer); writer.close(); - StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf); + StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf); // Now do reseek with empty KV to position to the beginning of the file @@ -652,7 +651,7 @@ public class TestStoreFile extends HBaseTestCase { .withChecksumType(CKTYPE) .withBytesPerCheckSum(CKBYTES).build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(f) .withBloomType(bt[x]) .withMaxKeyCount(expKeys[x]) @@ -674,7 +673,7 @@ public class TestStoreFile extends HBaseTestCase { } writer.close(); - StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf); + StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf); reader.loadFileInfo(); reader.loadBloomfilter(); StoreFileScanner scanner = reader.getStoreFileScanner(false, false); @@ -749,11 +748,11 @@ public class TestStoreFile extends HBaseTestCase { long seqId, String path) { StoreFile mock = Mockito.mock(StoreFile.class); - StoreFile.Reader reader = Mockito.mock(StoreFile.Reader.class); + StoreFileReader reader = Mockito.mock(StoreFileReader.class); Mockito.doReturn(size).when(reader).length(); - Mockito.doReturn(reader).when(mock).getReader(); + Mockito.doReturn(reader).when(mock).getStoreFileReader(); Mockito.doReturn(bulkLoad).when(mock).isBulkLoadResult(); Mockito.doReturn(bulkTimestamp).when(mock).getBulkLoadTimestamp(); Mockito.doReturn(seqId).when(mock).getMaxSequenceId(); @@ -805,7 +804,7 @@ public class TestStoreFile extends HBaseTestCase { Path dir = new Path(storedir, "1234567890"); HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withOutputDir(dir) .withFileContext(meta) .build(); @@ -825,7 +824,7 @@ public class TestStoreFile extends HBaseTestCase { HColumnDescriptor hcd = mock(HColumnDescriptor.class); when(hcd.getName()).thenReturn(family); when(store.getFamily()).thenReturn(hcd); - StoreFile.Reader reader = hsf.createReader(); + StoreFileReader reader = hsf.createStoreFileReader(); StoreFileScanner scanner = reader.getStoreFileScanner(false, false); TreeSet columns = new TreeSet(Bytes.BYTES_COMPARATOR); columns.add(qualifier); @@ -875,13 +874,13 @@ public class TestStoreFile extends HBaseTestCase { conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false); CacheConfig cacheConf = new CacheConfig(conf); Path pathCowOff = new Path(baseDir, "123456789"); - StoreFile.Writer writer = writeStoreFile(conf, cacheConf, pathCowOff, 3); + StoreFileWriter writer = writeStoreFile(conf, cacheConf, pathCowOff, 3); StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE); LOG.debug(hsf.getPath().toString()); // Read this file, we should see 3 misses - StoreFile.Reader reader = hsf.createReader(); + StoreFileReader reader = hsf.createStoreFileReader(); reader.loadFileInfo(); StoreFileScanner scanner = reader.getStoreFileScanner(true, true); scanner.seek(KeyValue.LOWESTKEY); @@ -902,7 +901,7 @@ public class TestStoreFile extends HBaseTestCase { BloomType.NONE); // Read this file, we should see 3 hits - reader = hsf.createReader(); + reader = hsf.createStoreFileReader(); scanner = reader.getStoreFileScanner(true, true); scanner.seek(KeyValue.LOWESTKEY); while (scanner.next() != null); @@ -916,13 +915,13 @@ public class TestStoreFile extends HBaseTestCase { // Let's read back the two files to ensure the blocks exactly match hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf, BloomType.NONE); - StoreFile.Reader readerOne = hsf.createReader(); + StoreFileReader readerOne = hsf.createStoreFileReader(); readerOne.loadFileInfo(); StoreFileScanner scannerOne = readerOne.getStoreFileScanner(true, true); scannerOne.seek(KeyValue.LOWESTKEY); hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf, BloomType.NONE); - StoreFile.Reader readerTwo = hsf.createReader(); + StoreFileReader readerTwo = hsf.createStoreFileReader(); readerTwo.loadFileInfo(); StoreFileScanner scannerTwo = readerTwo.getStoreFileScanner(true, true); scannerTwo.seek(KeyValue.LOWESTKEY); @@ -955,7 +954,7 @@ public class TestStoreFile extends HBaseTestCase { cacheConf = new CacheConfig(conf); hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf, BloomType.NONE); - reader = hsf.createReader(); + reader = hsf.createStoreFileReader(); reader.close(cacheConf.shouldEvictOnClose()); // We should have 3 new evictions @@ -969,7 +968,7 @@ public class TestStoreFile extends HBaseTestCase { cacheConf = new CacheConfig(conf); hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf, BloomType.NONE); - reader = hsf.createReader(); + reader = hsf.createStoreFileReader(); reader.close(cacheConf.shouldEvictOnClose()); // We expect no changes @@ -990,7 +989,7 @@ public class TestStoreFile extends HBaseTestCase { return new Path(new Path(regionDir, family), path.getName()); } - private StoreFile.Writer writeStoreFile(Configuration conf, + private StoreFileWriter writeStoreFile(Configuration conf, CacheConfig cacheConf, Path path, int numBlocks) throws IOException { // Let's put ~5 small KVs in each block, so let's make 5*numBlocks KVs @@ -1010,7 +1009,7 @@ public class TestStoreFile extends HBaseTestCase { .withBytesPerCheckSum(CKBYTES) .build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(path) .withMaxKeyCount(2000) .withFileContext(meta) @@ -1046,7 +1045,7 @@ public class TestStoreFile extends HBaseTestCase { .withDataBlockEncoding(dataBlockEncoderAlgo) .build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(path) .withMaxKeyCount(2000) .withFileContext(meta) @@ -1055,7 +1054,7 @@ public class TestStoreFile extends HBaseTestCase { StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE); - StoreFile.Reader reader = storeFile.createReader(); + StoreFileReader reader = storeFile.createStoreFileReader(); Map fileInfo = reader.loadFileInfo(); byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java index 4a6b2e7..758487d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java @@ -65,13 +65,13 @@ public class TestStoreFileScannerWithTagCompression { HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).withIncludesTags(true) .withCompressTags(true).withDataBlockEncoding(DataBlockEncoding.PREFIX).build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs).withFilePath(f) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(f) .withFileContext(meta).build(); writeStoreFile(writer); writer.close(); - StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf); + StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf); StoreFileScanner s = reader.getStoreFileScanner(false, false); try { // Now do reseek with empty KV to position to the beginning of the file @@ -91,7 +91,7 @@ public class TestStoreFileScannerWithTagCompression { } } - private void writeStoreFile(final StoreFile.Writer writer) throws IOException { + private void writeStoreFile(final StoreFileWriter writer) throws IOException { byte[] fam = Bytes.toBytes("f"); byte[] qualifier = Bytes.toBytes("q"); long now = System.currentTimeMillis(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java index 3e90097..fb4d55b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java @@ -105,7 +105,7 @@ public class TestStripeStoreEngine { StoreFile sf = mock(StoreFile.class); when(sf.getMetadataValue(any(byte[].class))) .thenReturn(StripeStoreFileManager.INVALID_KEY); - when(sf.getReader()).thenReturn(mock(StoreFile.Reader.class)); + when(sf.getStoreFileReader()).thenReturn(mock(StoreFileReader.class)); when(sf.getPath()).thenReturn(new Path("moo")); return sf; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/MockStoreFileGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/MockStoreFileGenerator.java index 0760736..6c2849c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/MockStoreFileGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/MockStoreFileGenerator.java @@ -23,10 +23,10 @@ import java.util.List; import java.util.Random; import com.google.common.base.Objects; -import com.google.common.io.Files; import org.apache.commons.lang.RandomStringUtils; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileReader; import org.apache.hadoop.util.StringUtils; import static org.mockito.Mockito.mock; @@ -63,7 +63,7 @@ class MockStoreFileGenerator { protected StoreFile createMockStoreFile(final long sizeInBytes, final long seqId) { StoreFile mockSf = mock(StoreFile.class); - StoreFile.Reader reader = mock(StoreFile.Reader.class); + StoreFileReader reader = mock(StoreFileReader.class); String stringPath = "/hbase/testTable/regionA/" + RandomStringUtils.random(FILENAME_LENGTH, 0, 0, true, true, null, random); Path path = new Path(stringPath); @@ -77,7 +77,7 @@ class MockStoreFileGenerator { when(mockSf.excludeFromMinorCompaction()).thenReturn(false); when(mockSf.isReference()).thenReturn(false); // TODO come back to // this when selection takes this into account - when(mockSf.getReader()).thenReturn(reader); + when(mockSf.getStoreFileReader()).thenReturn(reader); String toString = Objects.toStringHelper("MockStoreFile") .add("isReference", false) .add("fileSize", StringUtils.humanReadableInt(sizeInBytes)) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java index 1e96aa0..08917a1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/PerfTestCompactionPolicies.java @@ -187,7 +187,7 @@ public class PerfTestCompactionPolicies extends MockStoreFileGenerator { storeFiles.removeAll(filesToCompact); for (StoreFile storeFile : filesToCompact) { - newFileSize += storeFile.getReader().length(); + newFileSize += storeFile.getStoreFileReader().length(); } storeFiles.add(createMockStoreFileBytes(newFileSize)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java index 6ec4cd4..756e569 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactor.java @@ -46,7 +46,9 @@ import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileReader; import org.apache.hadoop.hbase.regionserver.StoreFileScanner; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.regionserver.StripeMultiFileWriter; import org.apache.hadoop.hbase.util.Bytes; import org.mockito.invocation.InvocationOnMock; @@ -58,15 +60,15 @@ public class TestCompactor { // "Files" are totally unused, it's Scanner class below that gives compactor fake KVs. // But compaction depends on everything under the sun, so stub everything with dummies. StoreFile sf = mock(StoreFile.class); - StoreFile.Reader r = mock(StoreFile.Reader.class); + StoreFileReader r = mock(StoreFileReader.class); when(r.length()).thenReturn(1L); when(r.getBloomFilterType()).thenReturn(BloomType.NONE); when(r.getHFileReader()).thenReturn(mock(HFile.Reader.class)); when(r.getStoreFileScanner(anyBoolean(), anyBoolean(), anyBoolean(), anyLong())) .thenReturn(mock(StoreFileScanner.class)); - when(sf.getReader()).thenReturn(r); - when(sf.createReader()).thenReturn(r); - when(sf.createReader(anyBoolean())).thenReturn(r); + when(sf.getStoreFileReader()).thenReturn(r); + when(sf.createStoreFileReader()).thenReturn(r); + when(sf.createStoreFileReader(anyBoolean())).thenReturn(r); when(sf.cloneForReader()).thenReturn(sf); when(sf.getMaxSequenceId()).thenReturn(maxSequenceId); return sf; @@ -78,7 +80,7 @@ public class TestCompactor { // StoreFile.Writer has private ctor and is unwieldy, so this has to be convoluted. public static class StoreFileWritersCapture - implements Answer, StripeMultiFileWriter.WriterFactory { + implements Answer, StripeMultiFileWriter.WriterFactory { public static class Writer { public ArrayList kvs = new ArrayList(); public TreeMap data = new TreeMap(Bytes.BYTES_COMPARATOR); @@ -88,10 +90,10 @@ public class TestCompactor { private List writers = new ArrayList(); @Override - public StoreFile.Writer createWriter() throws IOException { + public StoreFileWriter createWriter() throws IOException { final Writer realWriter = new Writer(); writers.add(realWriter); - StoreFile.Writer writer = mock(StoreFile.Writer.class); + StoreFileWriter writer = mock(StoreFileWriter.class); doAnswer(new Answer() { public Object answer(InvocationOnMock invocation) { return realWriter.kvs.add((KeyValue) invocation.getArguments()[0]); @@ -120,7 +122,7 @@ public class TestCompactor { } @Override - public StoreFile.Writer answer(InvocationOnMock invocation) throws Throwable { + public StoreFileWriter answer(InvocationOnMock invocation) throws Throwable { return createWriter(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java index 0c3c8b6..4af9575 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java @@ -100,7 +100,7 @@ public class TestDateTieredCompactor { when(store.areWritesEnabled()).thenReturn(true); when(store.getFileSystem()).thenReturn(mock(FileSystem.class)); when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME)); - when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), anyBoolean(), + when(store.createWriterInTmpDir(anyLong(), any(Compression.Algorithm.class), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers); when(store.getComparator()).thenReturn(new KVComparator()); long maxSequenceId = StoreFile.getMaxSequenceIdInList(storefiles); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java index 4d47840..19eefc6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java @@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileReader; import org.apache.hadoop.hbase.regionserver.StoreFileScanner; import org.apache.hadoop.hbase.regionserver.StripeMultiFileWriter; import org.apache.hadoop.hbase.regionserver.StripeStoreConfig; @@ -339,8 +340,8 @@ public class TestStripeCompactionPolicy { EnvironmentEdgeManager.injectEdge(edge); try { StoreFile expiredFile = createFile(), notExpiredFile = createFile(); - when(expiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl - 1); - when(notExpiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl + 1); + when(expiredFile.getStoreFileReader().getMaxTimestamp()).thenReturn(now - defaultTtl - 1); + when(notExpiredFile.getStoreFileReader().getMaxTimestamp()).thenReturn(now - defaultTtl + 1); List expired = Lists.newArrayList(expiredFile, expiredFile); List notExpired = Lists.newArrayList(notExpiredFile, notExpiredFile); List mixed = Lists.newArrayList(expiredFile, notExpiredFile); @@ -379,8 +380,8 @@ public class TestStripeCompactionPolicy { EnvironmentEdgeManager.injectEdge(edge); try { StoreFile expiredFile = createFile(), notExpiredFile = createFile(); - when(expiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl - 1); - when(notExpiredFile.getReader().getMaxTimestamp()).thenReturn(now - defaultTtl + 1); + when(expiredFile.getStoreFileReader().getMaxTimestamp()).thenReturn(now - defaultTtl - 1); + when(notExpiredFile.getStoreFileReader().getMaxTimestamp()).thenReturn(now - defaultTtl + 1); List expired = Lists.newArrayList(expiredFile, expiredFile); List notExpired = Lists.newArrayList(notExpiredFile, notExpiredFile); @@ -733,16 +734,16 @@ public class TestStripeCompactionPolicy { private static StoreFile createFile(long size) throws Exception { StoreFile sf = mock(StoreFile.class); when(sf.getPath()).thenReturn(new Path("moo")); - StoreFile.Reader r = mock(StoreFile.Reader.class); + StoreFileReader r = mock(StoreFileReader.class); when(r.getEntries()).thenReturn(size); when(r.length()).thenReturn(size); when(r.getBloomFilterType()).thenReturn(BloomType.NONE); when(r.getHFileReader()).thenReturn(mock(HFile.Reader.class)); when(r.getStoreFileScanner(anyBoolean(), anyBoolean(), anyBoolean(), anyLong())).thenReturn( mock(StoreFileScanner.class)); - when(sf.getReader()).thenReturn(r); - when(sf.createReader(anyBoolean())).thenReturn(r); - when(sf.createReader()).thenReturn(r); + when(sf.getStoreFileReader()).thenReturn(r); + when(sf.createStoreFileReader(anyBoolean())).thenReturn(r); + when(sf.createStoreFileReader()).thenReturn(r); when(sf.cloneForReader()).thenReturn(sf); return sf; } @@ -765,7 +766,7 @@ public class TestStripeCompactionPolicy { when(store.getFamily()).thenReturn(col); when(store.getRegionInfo()).thenReturn(info); when( - store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), anyBoolean(), + store.createWriterInTmpDir(anyLong(), any(Compression.Algorithm.class), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers); Configuration conf = HBaseConfiguration.create(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java index cee118f..a209928 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java @@ -201,7 +201,7 @@ public class TestStripeCompactor { when(store.areWritesEnabled()).thenReturn(true); when(store.getFileSystem()).thenReturn(mock(FileSystem.class)); when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME)); - when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), anyBoolean(), + when(store.createWriterInTmpDir(anyLong(), any(Compression.Algorithm.class), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers); when(store.getComparator()).thenReturn(new KVComparator()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java index a15db00..7d11dee 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -848,7 +847,7 @@ public abstract class TestVisibilityLabels { Collection storefiles = store.getStorefiles(); assertTrue(storefiles.size() > 0); for (StoreFile storeFile : storefiles) { - assertTrue(storeFile.getReader().getHFileReader().getFileContext().isIncludesTags()); + assertTrue(storeFile.getStoreFileReader().getHFileReader().getFileContext().isIncludesTags()); } } -- 2.3.2 (Apple Git-55)