From bc0d2f1b8c22d95380ecef95f859a8e53ab9a950 Mon Sep 17 00:00:00 2001 From: Apekshit Date: Fri, 19 Feb 2016 19:17:46 -0800 Subject: [PATCH] HBASE-15296 Break out writer and reader from StoreFile. Done using Intellij15 Refactor > Move. (Apekshit) --- .../hbase/coprocessor/BaseRegionObserver.java | 10 +- .../hadoop/hbase/coprocessor/RegionObserver.java | 9 +- .../hadoop/hbase/io/HalfStoreFileReader.java | 4 +- .../hadoop/hbase/mapreduce/HFileOutputFormat2.java | 7 +- .../hbase/mapreduce/LoadIncrementalHFiles.java | 9 +- .../hbase/regionserver/DefaultStoreFlusher.java | 2 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 2 +- .../apache/hadoop/hbase/regionserver/HStore.java | 24 +- .../hbase/regionserver/RegionCoprocessorHost.java | 10 +- .../apache/hadoop/hbase/regionserver/Store.java | 4 +- .../hadoop/hbase/regionserver/StoreFile.java | 1001 +------------------- .../hadoop/hbase/regionserver/StoreFileInfo.java | 6 +- .../hadoop/hbase/regionserver/StoreFileReader.java | 559 +++++++++++ .../hbase/regionserver/StoreFileScanner.java | 9 +- .../hadoop/hbase/regionserver/StoreFileWriter.java | 458 +++++++++ .../hadoop/hbase/regionserver/StoreFlusher.java | 2 +- .../hadoop/hbase/regionserver/StoreUtils.java | 2 +- .../hbase/regionserver/StripeMultiFileWriter.java | 16 +- .../hbase/regionserver/StripeStoreFlusher.java | 6 +- .../compactions/CompactionRequest.java | 4 +- .../hbase/regionserver/compactions/Compactor.java | 3 +- .../regionserver/compactions/DefaultCompactor.java | 3 +- .../regionserver/compactions/StripeCompactor.java | 4 +- .../hbase/util/CompoundBloomFilterWriter.java | 3 +- .../hbase/coprocessor/SimpleRegionObserver.java | 10 +- .../hadoop/hbase/io/hfile/TestCacheOnWrite.java | 4 +- .../apache/hadoop/hbase/io/hfile/TestPrefetch.java | 4 +- .../io/hfile/TestSeekBeforeWithInlineBlocks.java | 6 +- .../hbase/regionserver/CreateRandomStoreFile.java | 2 +- .../hbase/regionserver/DataBlockEncodingTool.java | 2 +- .../regionserver/EncodedSeekPerformanceTest.java | 4 +- .../hadoop/hbase/regionserver/MockStoreFile.java | 4 +- .../regionserver/TestCacheOnWriteInSchema.java | 4 +- .../hadoop/hbase/regionserver/TestCompaction.java | 2 +- .../regionserver/TestCompoundBloomFilter.java | 4 +- .../hbase/regionserver/TestFSErrorsExposed.java | 6 +- .../hadoop/hbase/regionserver/TestHRegion.java | 4 +- .../hbase/regionserver/TestScanWithBloomError.java | 2 +- .../hadoop/hbase/regionserver/TestStore.java | 6 +- .../hadoop/hbase/regionserver/TestStoreFile.java | 61 +- .../TestStoreFileScannerWithTagCompression.java | 6 +- .../hbase/regionserver/TestStripeCompactor.java | 10 +- .../hbase/regionserver/TestStripeStoreEngine.java | 2 +- .../compactions/MockStoreFileGenerator.java | 4 +- .../compactions/TestStripeCompactionPolicy.java | 3 +- 45 files changed, 1168 insertions(+), 1139 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java index d1ec1b3..8230c6f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java @@ -55,7 +55,7 @@ import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; -import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; +import org.apache.hadoop.hbase.regionserver.StoreFileReader; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.wal.WALKey; @@ -489,16 +489,16 @@ public abstract class BaseRegionObserver implements RegionObserver { } @Override - public Reader preStoreFileReaderOpen(ObserverContext ctx, + public StoreFileReader preStoreFileReaderOpen(ObserverContext ctx, FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, - Reference r, Reader reader) throws IOException { + Reference r, StoreFileReader reader) throws IOException { return reader; } @Override - public Reader postStoreFileReaderOpen(ObserverContext ctx, + public StoreFileReader postStoreFileReaderOpen(ObserverContext ctx, FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, - Reference r, Reader reader) throws IOException { + Reference r, StoreFileReader reader) throws IOException { return reader; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index 8c5c15a..f082f20 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -55,6 +55,7 @@ import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileReader; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.wal.WALKey; @@ -1215,9 +1216,9 @@ public interface RegionObserver extends Coprocessor { * default behavior, null otherwise * @throws IOException */ - StoreFile.Reader preStoreFileReaderOpen(final ObserverContext ctx, + StoreFileReader preStoreFileReaderOpen(final ObserverContext ctx, final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, long size, - final CacheConfig cacheConf, final Reference r, StoreFile.Reader reader) throws IOException; + final CacheConfig cacheConf, final Reference r, StoreFileReader reader) throws IOException; /** * Called after the creation of Reader for a store file. @@ -1233,9 +1234,9 @@ public interface RegionObserver extends Coprocessor { * @return The reader to use * @throws IOException */ - StoreFile.Reader postStoreFileReaderOpen(final ObserverContext ctx, + StoreFileReader postStoreFileReaderOpen(final ObserverContext ctx, final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, long size, - final CacheConfig cacheConf, final Reference r, StoreFile.Reader reader) throws IOException; + final CacheConfig cacheConf, final Reference r, StoreFileReader reader) throws IOException; /** * Called after a new cell has been created during an increment operation, but before diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java index ed2e925..1707df4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFileScanner; -import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileReader; import org.apache.hadoop.hbase.util.Bytes; /** @@ -50,7 +50,7 @@ import org.apache.hadoop.hbase.util.Bytes; *

This file is not splitable. Calls to {@link #midkey()} return null. */ @InterfaceAudience.Private -public class HalfStoreFileReader extends StoreFile.Reader { +public class HalfStoreFileReader extends StoreFileReader { private static final Log LOG = LogFactory.getLog(HalfStoreFileReader.class); final boolean top; // This is the key we split around. Its the first possible entry on a row: diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index 338c6e8..50c3fdc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.SequenceFile; @@ -246,7 +247,7 @@ public class HFileOutputFormat2 contextBuilder.withDataBlockEncoding(encoding); HFileContext hFileContext = contextBuilder.build(); - wl.writer = new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs) + wl.writer = new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), fs) .withOutputDir(familydir).withBloomType(bloomType) .withComparator(KeyValue.COMPARATOR) .withFileContext(hFileContext).build(); @@ -255,7 +256,7 @@ public class HFileOutputFormat2 return wl; } - private void close(final StoreFile.Writer w) throws IOException { + private void close(final StoreFileWriter w) throws IOException { if (w != null) { w.appendFileInfo(StoreFile.BULKLOAD_TIME_KEY, Bytes.toBytes(System.currentTimeMillis())); @@ -285,7 +286,7 @@ public class HFileOutputFormat2 */ static class WriterLength { long written = 0; - StoreFile.Writer writer = null; + StoreFileWriter writer = null; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index 752a639..95b34c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -38,8 +38,6 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -51,7 +49,6 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.NeedUnmanagedConnectionException; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.RegionServerCallable; import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; @@ -71,8 +68,8 @@ import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HStore; -import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.token.FsDelegationToken; import org.apache.hadoop.hbase.util.Bytes; @@ -874,7 +871,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { FileSystem fs = inFile.getFileSystem(conf); CacheConfig cacheConf = new CacheConfig(conf); HalfStoreFileReader halfReader = null; - StoreFile.Writer halfWriter = null; + StoreFileWriter halfWriter = null; try { halfReader = new HalfStoreFileReader(fs, inFile, cacheConf, reference, conf); Map fileInfo = halfReader.loadFileInfo(); @@ -890,7 +887,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { .withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()) .withIncludesTags(true) .build(); - halfWriter = new StoreFile.WriterBuilder(conf, cacheConf, + halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs) .withFilePath(outFile) .withBloomType(bloomFilterType) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java index da89129..10e51c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java @@ -56,7 +56,7 @@ public class DefaultStoreFlusher extends StoreFlusher { return result; // NULL scanner returned from coprocessor hooks means skip normal processing } - StoreFile.Writer writer; + StoreFileWriter writer; try { // TODO: We can fail in the below block before we complete adding this flush to // list of store files. Add cleanup of anything put on filesystem if we fail. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 0aca49c..29a5f57 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -1644,7 +1644,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi Collection storeFiles = store.getStorefiles(); if (storeFiles == null) continue; for (StoreFile file : storeFiles) { - StoreFile.Reader sfReader = file.getReader(); + StoreFileReader sfReader = file.getReader(); if (sfReader == null) continue; HFile.Reader reader = sfReader.getHFileReader(); if (reader == null) continue; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index a19407a..c08e1b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -650,7 +650,7 @@ public class HStore implements Store { info.setRegionCoprocessorHost(this.region.getCoprocessorHost()); StoreFile storeFile = new StoreFile(this.getFileSystem(), info, this.conf, this.cacheConf, this.family.getBloomFilterType()); - StoreFile.Reader r = storeFile.createReader(); + StoreFileReader r = storeFile.createReader(); r.setReplicaStoreFile(isPrimaryReplicaStore()); return storeFile; } @@ -798,7 +798,7 @@ public class HStore implements Store { } private void bulkLoadHFile(StoreFile sf) throws IOException { - StoreFile.Reader r = sf.getReader(); + StoreFileReader r = sf.getReader(); this.storeSize += r.length(); this.totalUncompressedBytes += r.getTotalUncompressedBytes(); @@ -960,7 +960,7 @@ public class HStore implements Store { status.setStatus("Flushing " + this + ": reopening flushed file"); StoreFile sf = createStoreFileAndReader(dstPath); - StoreFile.Reader r = sf.getReader(); + StoreFileReader r = sf.getReader(); this.storeSize += r.length(); this.totalUncompressedBytes += r.getTotalUncompressedBytes(); @@ -973,7 +973,7 @@ public class HStore implements Store { } @Override - public StoreFile.Writer createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, + public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag) throws IOException { @@ -990,7 +990,7 @@ public class HStore implements Store { * @return Writer for a new StoreFile in the tmp dir. */ @Override - public StoreFile.Writer createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, + public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag, boolean shouldDropBehind) throws IOException { @@ -1009,7 +1009,7 @@ public class HStore implements Store { } HFileContext hFileContext = createFileContext(compression, includeMVCCReadpoint, includesTag, cryptoContext); - StoreFile.Writer w = new StoreFile.WriterBuilder(conf, writerCacheConf, + StoreFileWriter w = new StoreFileWriter.Builder(conf, writerCacheConf, this.getFileSystem()) .withFilePath(fs.createTempName()) .withComparator(comparator) @@ -1800,7 +1800,7 @@ public class HStore implements Store { this.storeSize = 0L; this.totalUncompressedBytes = 0L; for (StoreFile hsf : this.storeEngine.getStoreFileManager().getStorefiles()) { - StoreFile.Reader r = hsf.getReader(); + StoreFileReader r = hsf.getReader(); if (r == null) { LOG.warn("StoreFile " + hsf + " has a null Reader"); continue; @@ -1909,7 +1909,7 @@ public class HStore implements Store { private boolean rowAtOrBeforeFromStoreFile(final StoreFile f, final GetClosestRowBeforeTracker state) throws IOException { - StoreFile.Reader r = f.getReader(); + StoreFileReader r = f.getReader(); if (r == null) { LOG.warn("StoreFile " + f + " has a null Reader"); return false; @@ -2101,7 +2101,7 @@ public class HStore implements Store { public long getStorefilesSize() { long size = 0; for (StoreFile s: this.storeEngine.getStoreFileManager().getStorefiles()) { - StoreFile.Reader r = s.getReader(); + StoreFileReader r = s.getReader(); if (r == null) { LOG.warn("StoreFile " + s + " has a null Reader"); continue; @@ -2115,7 +2115,7 @@ public class HStore implements Store { public long getStorefilesIndexSize() { long size = 0; for (StoreFile s: this.storeEngine.getStoreFileManager().getStorefiles()) { - StoreFile.Reader r = s.getReader(); + StoreFileReader r = s.getReader(); if (r == null) { LOG.warn("StoreFile " + s + " has a null Reader"); continue; @@ -2129,7 +2129,7 @@ public class HStore implements Store { public long getTotalStaticIndexSize() { long size = 0; for (StoreFile s : this.storeEngine.getStoreFileManager().getStorefiles()) { - StoreFile.Reader r = s.getReader(); + StoreFileReader r = s.getReader(); if (r == null) { continue; } @@ -2142,7 +2142,7 @@ public class HStore implements Store { public long getTotalStaticBloomSize() { long size = 0; for (StoreFile s : this.storeEngine.getStoreFileManager().getStorefiles()) { - StoreFile.Reader r = s.getReader(); + StoreFileReader r = s.getReader(); if (r == null) { continue; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index 814370c..30bcba3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -1569,11 +1569,11 @@ public class RegionCoprocessorHost * default behavior, null otherwise * @throws IOException */ - public StoreFile.Reader preStoreFileReaderOpen(final FileSystem fs, final Path p, + public StoreFileReader preStoreFileReaderOpen(final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf, final Reference r) throws IOException { return execOperationWithResult(null, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { + coprocessors.isEmpty() ? null : new RegionOperationWithResult() { @Override public void call(RegionObserver oserver, ObserverContext ctx) throws IOException { @@ -1593,11 +1593,11 @@ public class RegionCoprocessorHost * @return The reader to use * @throws IOException */ - public StoreFile.Reader postStoreFileReaderOpen(final FileSystem fs, final Path p, + public StoreFileReader postStoreFileReaderOpen(final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, final long size, final CacheConfig cacheConf, - final Reference r, final StoreFile.Reader reader) throws IOException { + final Reference r, final StoreFileReader reader) throws IOException { return execOperationWithResult(reader, - coprocessors.isEmpty() ? null : new RegionOperationWithResult() { + coprocessors.isEmpty() ? null : new RegionOperationWithResult() { @Override public void call(RegionObserver oserver, ObserverContext ctx) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 9f17526..baef7b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -167,7 +167,7 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf * @param includeMVCCReadpoint whether we should out the MVCC readpoint * @return Writer for a new StoreFile in the tmp dir. */ - StoreFile.Writer createWriterInTmp( + StoreFileWriter createWriterInTmp( long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, @@ -183,7 +183,7 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf * @param shouldDropBehind should the writer drop caches behind writes * @return Writer for a new StoreFile in the tmp dir. */ - StoreFile.Writer createWriterInTmp( + StoreFileWriter createWriterInTmp( long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 46a64f2..bba2213 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -18,16 +18,11 @@ */ package org.apache.hadoop.hbase.regionserver; -import java.io.DataInput; import java.io.IOException; -import java.net.InetSocketAddress; -import java.nio.ByteBuffer; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.Map; -import java.util.SortedSet; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; @@ -36,40 +31,25 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; -import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.io.hfile.HFileContext; -import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.HFileWriterV2; -import org.apache.hadoop.hbase.regionserver.compactions.Compactor; -import org.apache.hadoop.hbase.util.BloomFilter; import org.apache.hadoop.hbase.util.BloomFilterFactory; -import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Writables; -import org.apache.hadoop.io.WritableUtils; import com.google.common.base.Function; -import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.Ordering; /** * A Store data file. Stores usually have one or more of these files. They * are produced by flushing the memstore to disk. To - * create, instantiate a writer using {@link StoreFile.WriterBuilder} + * create, instantiate a writer using {@link StoreFileWriter.Builder} * and append data. Be sure to add any metadata before calling close on the * Writer (Use the appendMetadata convenience methods). On close, a StoreFile * is sitting in the Filesystem. To refer to it, create a StoreFile instance @@ -105,7 +85,7 @@ public class StoreFile { Bytes.toBytes("DELETE_FAMILY_COUNT"); /** Last Bloom filter key in FileInfo */ - private static final byte[] LAST_BLOOM_KEY = Bytes.toBytes("LAST_BLOOM_KEY"); + public static final byte[] LAST_BLOOM_KEY = Bytes.toBytes("LAST_BLOOM_KEY"); /** Key for Timerange information in metadata*/ public static final byte[] TIMERANGE_KEY = Bytes.toBytes("TIMERANGE"); @@ -178,7 +158,7 @@ public class StoreFile { private Map metadataMap; // StoreFile.Reader - private volatile Reader reader; + private volatile StoreFileReader reader; /** * Bloom filter type specified in column family configuration. Does not @@ -390,7 +370,7 @@ public class StoreFile { * @throws IOException * @see #closeReader(boolean) */ - private Reader open(boolean canUseDropBehind) throws IOException { + private StoreFileReader open(boolean canUseDropBehind) throws IOException { if (this.reader != null) { throw new IllegalAccessError("Already open"); } @@ -490,7 +470,7 @@ public class StoreFile { return this.reader; } - public Reader createReader() throws IOException { + public StoreFileReader createReader() throws IOException { return createReader(false); } @@ -498,7 +478,7 @@ public class StoreFile { * @return Reader for StoreFile. creates if necessary * @throws IOException */ - public Reader createReader(boolean canUseDropBehind) throws IOException { + public StoreFileReader createReader(boolean canUseDropBehind) throws IOException { if (this.reader == null) { try { this.reader = open(canUseDropBehind); @@ -520,7 +500,7 @@ public class StoreFile { * @return Current reader. Must call createReader first else returns null. * @see #createReader() */ - public Reader getReader() { + public StoreFileReader getReader() { return this.reader; } @@ -570,125 +550,6 @@ public class StoreFile { return sb.toString(); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ICAST_INTEGER_MULTIPLY_CAST_TO_LONG", - justification="Will not overflow") - public static class WriterBuilder { - private final Configuration conf; - private final CacheConfig cacheConf; - private final FileSystem fs; - - private KeyValue.KVComparator comparator = KeyValue.COMPARATOR; - private BloomType bloomType = BloomType.NONE; - private long maxKeyCount = 0; - private Path dir; - private Path filePath; - private InetSocketAddress[] favoredNodes; - private HFileContext fileContext; - - public WriterBuilder(Configuration conf, CacheConfig cacheConf, - FileSystem fs) { - this.conf = conf; - this.cacheConf = cacheConf; - this.fs = fs; - } - - /** - * Use either this method or {@link #withFilePath}, but not both. - * @param dir Path to column family directory. The directory is created if - * does not exist. The file is given a unique name within this - * directory. - * @return this (for chained invocation) - */ - public WriterBuilder withOutputDir(Path dir) { - Preconditions.checkNotNull(dir); - this.dir = dir; - return this; - } - - /** - * Use either this method or {@link #withOutputDir}, but not both. - * @param filePath the StoreFile path to write - * @return this (for chained invocation) - */ - public WriterBuilder withFilePath(Path filePath) { - Preconditions.checkNotNull(filePath); - this.filePath = filePath; - return this; - } - - /** - * @param favoredNodes an array of favored nodes or possibly null - * @return this (for chained invocation) - */ - public WriterBuilder withFavoredNodes(InetSocketAddress[] favoredNodes) { - this.favoredNodes = favoredNodes; - return this; - } - - public WriterBuilder withComparator(KeyValue.KVComparator comparator) { - Preconditions.checkNotNull(comparator); - this.comparator = comparator; - return this; - } - - public WriterBuilder withBloomType(BloomType bloomType) { - Preconditions.checkNotNull(bloomType); - this.bloomType = bloomType; - return this; - } - - /** - * @param maxKeyCount estimated maximum number of keys we expect to add - * @return this (for chained invocation) - */ - public WriterBuilder withMaxKeyCount(long maxKeyCount) { - this.maxKeyCount = maxKeyCount; - return this; - } - - public WriterBuilder withFileContext(HFileContext fileContext) { - this.fileContext = fileContext; - return this; - } - - public WriterBuilder withShouldDropCacheBehind(boolean shouldDropCacheBehind/*NOT USED!!*/) { - // TODO: HAS NO EFFECT!!! FIX!! - return this; - } - /** - * Create a store file writer. Client is responsible for closing file when - * done. If metadata, add BEFORE closing using - * {@link Writer#appendMetadata}. - */ - public Writer build() throws IOException { - if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) { - throw new IllegalArgumentException("Either specify parent directory " + - "or file path"); - } - - if (dir == null) { - dir = filePath.getParent(); - } - - if (!fs.exists(dir)) { - fs.mkdirs(dir); - } - - if (filePath == null) { - filePath = getUniqueFile(fs, dir); - if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) { - bloomType = BloomType.NONE; - } - } - - if (comparator == null) { - comparator = KeyValue.COMPARATOR; - } - return new Writer(fs, filePath, - conf, cacheConf, comparator, bloomType, maxKeyCount, favoredNodes, fileContext); - } - } - /** * @param fs * @param dir Directory to create file in. @@ -743,854 +604,6 @@ public class StoreFile { } /** - * A StoreFile writer. Use this to read/write HBase Store Files. It is package - * local because it is an implementation detail of the HBase regionserver. - */ - public static class Writer implements Compactor.CellSink { - private final BloomFilterWriter generalBloomFilterWriter; - private final BloomFilterWriter deleteFamilyBloomFilterWriter; - private final BloomType bloomType; - private byte[] lastBloomKey; - private int lastBloomKeyOffset, lastBloomKeyLen; - private KVComparator kvComparator; - private Cell lastCell = null; - private long earliestPutTs = HConstants.LATEST_TIMESTAMP; - private Cell lastDeleteFamilyCell = null; - private long deleteFamilyCnt = 0; - - TimeRangeTracker timeRangeTracker = new TimeRangeTracker(); - /* isTimeRangeTrackerSet keeps track if the timeRange has already been set - * When flushing a memstore, we set TimeRange and use this variable to - * indicate that it doesn't need to be calculated again while - * appending KeyValues. - * It is not set in cases of compactions when it is recalculated using only - * the appended KeyValues*/ - boolean isTimeRangeTrackerSet = false; - - protected HFile.Writer writer; - - /** - * Creates an HFile.Writer that also write helpful meta data. - * @param fs file system to write to - * @param path file name to create - * @param conf user configuration - * @param comparator key comparator - * @param bloomType bloom filter setting - * @param maxKeys the expected maximum number of keys to be added. Was used - * for Bloom filter size in {@link HFile} format version 1. - * @param favoredNodes - * @param fileContext - The HFile context - * @throws IOException problem writing to FS - */ - private Writer(FileSystem fs, Path path, - final Configuration conf, - CacheConfig cacheConf, - final KVComparator comparator, BloomType bloomType, long maxKeys, - InetSocketAddress[] favoredNodes, HFileContext fileContext) - throws IOException { - writer = HFile.getWriterFactory(conf, cacheConf) - .withPath(fs, path) - .withComparator(comparator) - .withFavoredNodes(favoredNodes) - .withFileContext(fileContext) - .create(); - - this.kvComparator = comparator; - - generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite( - conf, cacheConf, bloomType, - (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); - - if (generalBloomFilterWriter != null) { - this.bloomType = bloomType; - if (LOG.isTraceEnabled()) LOG.trace("Bloom filter type for " + path + ": " + - this.bloomType + ", " + generalBloomFilterWriter.getClass().getSimpleName()); - } else { - // Not using Bloom filters. - this.bloomType = BloomType.NONE; - } - - // initialize delete family Bloom filter when there is NO RowCol Bloom - // filter - if (this.bloomType != BloomType.ROWCOL) { - this.deleteFamilyBloomFilterWriter = BloomFilterFactory - .createDeleteBloomAtWrite(conf, cacheConf, - (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); - } else { - deleteFamilyBloomFilterWriter = null; - } - if (deleteFamilyBloomFilterWriter != null) { - if (LOG.isTraceEnabled()) LOG.trace("Delete Family Bloom filter type for " + path + ": " - + deleteFamilyBloomFilterWriter.getClass().getSimpleName()); - } - } - - /** - * Writes meta data. - * Call before {@link #close()} since its written as meta data to this file. - * @param maxSequenceId Maximum sequence id. - * @param majorCompaction True if this file is product of a major compaction - * @throws IOException problem writing to FS - */ - public void appendMetadata(final long maxSequenceId, final boolean majorCompaction) - throws IOException { - writer.appendFileInfo(MAX_SEQ_ID_KEY, Bytes.toBytes(maxSequenceId)); - writer.appendFileInfo(MAJOR_COMPACTION_KEY, - Bytes.toBytes(majorCompaction)); - appendTrackedTimestampsToMetadata(); - } - - /** - * Add TimestampRange and earliest put timestamp to Metadata - */ - public void appendTrackedTimestampsToMetadata() throws IOException { - appendFileInfo(TIMERANGE_KEY,WritableUtils.toByteArray(timeRangeTracker)); - appendFileInfo(EARLIEST_PUT_TS, Bytes.toBytes(earliestPutTs)); - } - - /** - * Set TimeRangeTracker - * @param trt - */ - public void setTimeRangeTracker(final TimeRangeTracker trt) { - this.timeRangeTracker = trt; - isTimeRangeTrackerSet = true; - } - - /** - * Record the earlest Put timestamp. - * - * If the timeRangeTracker is not set, - * update TimeRangeTracker to include the timestamp of this key - * @param cell - */ - public void trackTimestamps(final Cell cell) { - if (KeyValue.Type.Put.getCode() == cell.getTypeByte()) { - earliestPutTs = Math.min(earliestPutTs, cell.getTimestamp()); - } - if (!isTimeRangeTrackerSet) { - timeRangeTracker.includeTimestamp(cell); - } - } - - private void appendGeneralBloomfilter(final Cell cell) throws IOException { - if (this.generalBloomFilterWriter != null) { - // only add to the bloom filter on a new, unique key - boolean newKey = true; - if (this.lastCell != null) { - switch(bloomType) { - case ROW: - newKey = ! kvComparator.matchingRows(cell, lastCell); - break; - case ROWCOL: - newKey = ! kvComparator.matchingRowColumn(cell, lastCell); - break; - case NONE: - newKey = false; - break; - default: - throw new IOException("Invalid Bloom filter type: " + bloomType + - " (ROW or ROWCOL expected)"); - } - } - if (newKey) { - /* - * http://2.bp.blogspot.com/_Cib_A77V54U/StZMrzaKufI/AAAAAAAAADo/ZhK7bGoJdMQ/s400/KeyValue.png - * Key = RowLen + Row + FamilyLen + Column [Family + Qualifier] + TimeStamp - * - * 2 Types of Filtering: - * 1. Row = Row - * 2. RowCol = Row + Qualifier - */ - byte[] bloomKey; - int bloomKeyOffset, bloomKeyLen; - - switch (bloomType) { - case ROW: - bloomKey = cell.getRowArray(); - bloomKeyOffset = cell.getRowOffset(); - bloomKeyLen = cell.getRowLength(); - break; - case ROWCOL: - // merge(row, qualifier) - // TODO: could save one buffer copy in case of compound Bloom - // filters when this involves creating a KeyValue - bloomKey = generalBloomFilterWriter.createBloomKey(cell.getRowArray(), - cell.getRowOffset(), cell.getRowLength(), cell.getQualifierArray(), - cell.getQualifierOffset(), cell.getQualifierLength()); - bloomKeyOffset = 0; - bloomKeyLen = bloomKey.length; - break; - default: - throw new IOException("Invalid Bloom filter type: " + bloomType + - " (ROW or ROWCOL expected)"); - } - generalBloomFilterWriter.add(bloomKey, bloomKeyOffset, bloomKeyLen); - if (lastBloomKey != null - && generalBloomFilterWriter.getComparator().compareFlatKey(bloomKey, - bloomKeyOffset, bloomKeyLen, lastBloomKey, - lastBloomKeyOffset, lastBloomKeyLen) <= 0) { - throw new IOException("Non-increasing Bloom keys: " - + Bytes.toStringBinary(bloomKey, bloomKeyOffset, bloomKeyLen) - + " after " - + Bytes.toStringBinary(lastBloomKey, lastBloomKeyOffset, - lastBloomKeyLen)); - } - lastBloomKey = bloomKey; - lastBloomKeyOffset = bloomKeyOffset; - lastBloomKeyLen = bloomKeyLen; - this.lastCell = cell; - } - } - } - - private void appendDeleteFamilyBloomFilter(final Cell cell) - throws IOException { - if (!CellUtil.isDeleteFamily(cell) && !CellUtil.isDeleteFamilyVersion(cell)) { - return; - } - - // increase the number of delete family in the store file - deleteFamilyCnt++; - if (null != this.deleteFamilyBloomFilterWriter) { - boolean newKey = true; - if (lastDeleteFamilyCell != null) { - newKey = !kvComparator.matchingRows(cell, lastDeleteFamilyCell); - } - if (newKey) { - this.deleteFamilyBloomFilterWriter.add(cell.getRowArray(), - cell.getRowOffset(), cell.getRowLength()); - this.lastDeleteFamilyCell = cell; - } - } - } - - public void append(final Cell cell) throws IOException { - appendGeneralBloomfilter(cell); - appendDeleteFamilyBloomFilter(cell); - writer.append(cell); - trackTimestamps(cell); - } - - public Path getPath() { - return this.writer.getPath(); - } - - boolean hasGeneralBloom() { - return this.generalBloomFilterWriter != null; - } - - /** - * For unit testing only. - * - * @return the Bloom filter used by this writer. - */ - BloomFilterWriter getGeneralBloomWriter() { - return generalBloomFilterWriter; - } - - private boolean closeBloomFilter(BloomFilterWriter bfw) throws IOException { - boolean haveBloom = (bfw != null && bfw.getKeyCount() > 0); - if (haveBloom) { - bfw.compactBloom(); - } - return haveBloom; - } - - private boolean closeGeneralBloomFilter() throws IOException { - boolean hasGeneralBloom = closeBloomFilter(generalBloomFilterWriter); - - // add the general Bloom filter writer and append file info - if (hasGeneralBloom) { - writer.addGeneralBloomFilter(generalBloomFilterWriter); - writer.appendFileInfo(BLOOM_FILTER_TYPE_KEY, - Bytes.toBytes(bloomType.toString())); - if (lastBloomKey != null) { - writer.appendFileInfo(LAST_BLOOM_KEY, Arrays.copyOfRange( - lastBloomKey, lastBloomKeyOffset, lastBloomKeyOffset - + lastBloomKeyLen)); - } - } - return hasGeneralBloom; - } - - private boolean closeDeleteFamilyBloomFilter() throws IOException { - boolean hasDeleteFamilyBloom = closeBloomFilter(deleteFamilyBloomFilterWriter); - - // add the delete family Bloom filter writer - if (hasDeleteFamilyBloom) { - writer.addDeleteFamilyBloomFilter(deleteFamilyBloomFilterWriter); - } - - // append file info about the number of delete family kvs - // even if there is no delete family Bloom. - writer.appendFileInfo(DELETE_FAMILY_COUNT, - Bytes.toBytes(this.deleteFamilyCnt)); - - return hasDeleteFamilyBloom; - } - - public void close() throws IOException { - boolean hasGeneralBloom = this.closeGeneralBloomFilter(); - boolean hasDeleteFamilyBloom = this.closeDeleteFamilyBloomFilter(); - - writer.close(); - - // Log final Bloom filter statistics. This needs to be done after close() - // because compound Bloom filters might be finalized as part of closing. - if (StoreFile.LOG.isTraceEnabled()) { - StoreFile.LOG.trace((hasGeneralBloom ? "" : "NO ") + "General Bloom and " + - (hasDeleteFamilyBloom ? "" : "NO ") + "DeleteFamily" + " was added to HFile " + - getPath()); - } - - } - - public void appendFileInfo(byte[] key, byte[] value) throws IOException { - writer.appendFileInfo(key, value); - } - - /** For use in testing, e.g. {@link org.apache.hadoop.hbase.regionserver.CreateRandomStoreFile} - */ - HFile.Writer getHFileWriter() { - return writer; - } - } - - /** - * Reader for a StoreFile. - */ - public static class Reader { - private static final Log LOG = LogFactory.getLog(Reader.class.getName()); - - protected BloomFilter generalBloomFilter = null; - protected BloomFilter deleteFamilyBloomFilter = null; - protected BloomType bloomFilterType; - private final HFile.Reader reader; - protected TimeRangeTracker timeRangeTracker = null; - protected long sequenceID = -1; - private byte[] lastBloomKey; - private long deleteFamilyCnt = -1; - private boolean bulkLoadResult = false; - - public Reader(FileSystem fs, Path path, CacheConfig cacheConf, Configuration conf) - throws IOException { - reader = HFile.createReader(fs, path, cacheConf, conf); - bloomFilterType = BloomType.NONE; - } - - public Reader(FileSystem fs, Path path, FSDataInputStreamWrapper in, long size, - CacheConfig cacheConf, Configuration conf) throws IOException { - reader = HFile.createReader(fs, path, in, size, cacheConf, conf); - bloomFilterType = BloomType.NONE; - } - - public void setReplicaStoreFile(boolean isPrimaryReplicaStoreFile) { - reader.setPrimaryReplicaReader(isPrimaryReplicaStoreFile); - } - public boolean isPrimaryReplicaReader() { - return reader.isPrimaryReplicaReader(); - } - - /** - * ONLY USE DEFAULT CONSTRUCTOR FOR UNIT TESTS - */ - Reader() { - this.reader = null; - } - - public KVComparator getComparator() { - return reader.getComparator(); - } - - /** - * Get a scanner to scan over this StoreFile. Do not use - * this overload if using this scanner for compactions. - * - * @param cacheBlocks should this scanner cache blocks? - * @param pread use pread (for highly concurrent small readers) - * @return a scanner - */ - public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, - boolean pread) { - return getStoreFileScanner(cacheBlocks, pread, false, - // 0 is passed as readpoint because this method is only used by test - // where StoreFile is directly operated upon - 0); - } - - /** - * Get a scanner to scan over this StoreFile. - * - * @param cacheBlocks should this scanner cache blocks? - * @param pread use pread (for highly concurrent small readers) - * @param isCompaction is scanner being used for compaction? - * @return a scanner - */ - public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, - boolean pread, - boolean isCompaction, long readPt) { - return new StoreFileScanner(this, - getScanner(cacheBlocks, pread, isCompaction), - !isCompaction, reader.hasMVCCInfo(), readPt); - } - - /** - * Warning: Do not write further code which depends on this call. Instead - * use getStoreFileScanner() which uses the StoreFileScanner class/interface - * which is the preferred way to scan a store with higher level concepts. - * - * @param cacheBlocks should we cache the blocks? - * @param pread use pread (for concurrent small readers) - * @return the underlying HFileScanner - */ - @Deprecated - public HFileScanner getScanner(boolean cacheBlocks, boolean pread) { - return getScanner(cacheBlocks, pread, false); - } - - /** - * Warning: Do not write further code which depends on this call. Instead - * use getStoreFileScanner() which uses the StoreFileScanner class/interface - * which is the preferred way to scan a store with higher level concepts. - * - * @param cacheBlocks - * should we cache the blocks? - * @param pread - * use pread (for concurrent small readers) - * @param isCompaction - * is scanner being used for compaction? - * @return the underlying HFileScanner - */ - @Deprecated - public HFileScanner getScanner(boolean cacheBlocks, boolean pread, - boolean isCompaction) { - return reader.getScanner(cacheBlocks, pread, isCompaction); - } - - public void close(boolean evictOnClose) throws IOException { - reader.close(evictOnClose); - } - - /** - * Check if this storeFile may contain keys within the TimeRange that - * have not expired (i.e. not older than oldestUnexpiredTS). - * @param timeRange the timeRange to restrict - * @param oldestUnexpiredTS the oldest timestamp that is not expired, as - * determined by the column family's TTL - * @return false if queried keys definitely don't exist in this StoreFile - */ - boolean passesTimerangeFilter(TimeRange timeRange, long oldestUnexpiredTS) { - if (timeRangeTracker == null) { - return true; - } else { - return timeRangeTracker.includesTimeRange(timeRange) && - timeRangeTracker.getMaximumTimestamp() >= oldestUnexpiredTS; - } - } - - /** - * Checks whether the given scan passes the Bloom filter (if present). Only - * checks Bloom filters for single-row or single-row-column scans. Bloom - * filter checking for multi-gets is implemented as part of the store - * scanner system (see {@link StoreFileScanner#seekExactly}) and uses - * the lower-level API {@link #passesGeneralBloomFilter(byte[], int, int, byte[], - * int, int)}. - * - * @param scan the scan specification. Used to determine the row, and to - * check whether this is a single-row ("get") scan. - * @param columns the set of columns. Only used for row-column Bloom - * filters. - * @return true if the scan with the given column set passes the Bloom - * filter, or if the Bloom filter is not applicable for the scan. - * False if the Bloom filter is applicable and the scan fails it. - */ - boolean passesBloomFilter(Scan scan, - final SortedSet columns) { - // Multi-column non-get scans will use Bloom filters through the - // lower-level API function that this function calls. - if (!scan.isGetScan()) { - return true; - } - - byte[] row = scan.getStartRow(); - switch (this.bloomFilterType) { - case ROW: - return passesGeneralBloomFilter(row, 0, row.length, null, 0, 0); - - case ROWCOL: - if (columns != null && columns.size() == 1) { - byte[] column = columns.first(); - return passesGeneralBloomFilter(row, 0, row.length, column, 0, - column.length); - } - - // For multi-column queries the Bloom filter is checked from the - // seekExact operation. - return true; - - default: - return true; - } - } - - public boolean passesDeleteFamilyBloomFilter(byte[] row, int rowOffset, - int rowLen) { - // Cache Bloom filter as a local variable in case it is set to null by - // another thread on an IO error. - BloomFilter bloomFilter = this.deleteFamilyBloomFilter; - - // Empty file or there is no delete family at all - if (reader.getTrailer().getEntryCount() == 0 || deleteFamilyCnt == 0) { - return false; - } - - if (bloomFilter == null) { - return true; - } - - try { - if (!bloomFilter.supportsAutoLoading()) { - return true; - } - return bloomFilter.contains(row, rowOffset, rowLen, null); - } catch (IllegalArgumentException e) { - LOG.error("Bad Delete Family bloom filter data -- proceeding without", - e); - setDeleteFamilyBloomFilterFaulty(); - } - - return true; - } - - /** - * A method for checking Bloom filters. Called directly from - * StoreFileScanner in case of a multi-column query. - * - * @param row - * @param rowOffset - * @param rowLen - * @param col - * @param colOffset - * @param colLen - * @return True if passes - */ - public boolean passesGeneralBloomFilter(byte[] row, int rowOffset, - int rowLen, byte[] col, int colOffset, int colLen) { - // Cache Bloom filter as a local variable in case it is set to null by - // another thread on an IO error. - BloomFilter bloomFilter = this.generalBloomFilter; - if (bloomFilter == null) { - return true; - } - - byte[] key; - switch (bloomFilterType) { - case ROW: - if (col != null) { - throw new RuntimeException("Row-only Bloom filter called with " + - "column specified"); - } - if (rowOffset != 0 || rowLen != row.length) { - throw new AssertionError("For row-only Bloom filters the row " - + "must occupy the whole array"); - } - key = row; - break; - - case ROWCOL: - key = bloomFilter.createBloomKey(row, rowOffset, rowLen, col, - colOffset, colLen); - - break; - - default: - return true; - } - - // Empty file - if (reader.getTrailer().getEntryCount() == 0) - return false; - - try { - boolean shouldCheckBloom; - ByteBuffer bloom; - if (bloomFilter.supportsAutoLoading()) { - bloom = null; - shouldCheckBloom = true; - } else { - bloom = reader.getMetaBlock(HFile.BLOOM_FILTER_DATA_KEY, - true); - shouldCheckBloom = bloom != null; - } - - if (shouldCheckBloom) { - boolean exists; - - // Whether the primary Bloom key is greater than the last Bloom key - // from the file info. For row-column Bloom filters this is not yet - // a sufficient condition to return false. - boolean keyIsAfterLast = lastBloomKey != null - && bloomFilter.getComparator().compareFlatKey(key, lastBloomKey) > 0; - - if (bloomFilterType == BloomType.ROWCOL) { - // Since a Row Delete is essentially a DeleteFamily applied to all - // columns, a file might be skipped if using row+col Bloom filter. - // In order to ensure this file is included an additional check is - // required looking only for a row bloom. - byte[] rowBloomKey = bloomFilter.createBloomKey(row, rowOffset, rowLen, - null, 0, 0); - - if (keyIsAfterLast - && bloomFilter.getComparator().compareFlatKey(rowBloomKey, - lastBloomKey) > 0) { - exists = false; - } else { - exists = - bloomFilter.contains(key, 0, key.length, bloom) || - bloomFilter.contains(rowBloomKey, 0, rowBloomKey.length, - bloom); - } - } else { - exists = !keyIsAfterLast - && bloomFilter.contains(key, 0, key.length, bloom); - } - - return exists; - } - } catch (IOException e) { - LOG.error("Error reading bloom filter data -- proceeding without", - e); - setGeneralBloomFilterFaulty(); - } catch (IllegalArgumentException e) { - LOG.error("Bad bloom filter data -- proceeding without", e); - setGeneralBloomFilterFaulty(); - } - - return true; - } - - /** - * Checks whether the given scan rowkey range overlaps with the current storefile's - * @param scan the scan specification. Used to determine the rowkey range. - * @return true if there is overlap, false otherwise - */ - public boolean passesKeyRangeFilter(Scan scan) { - if (this.getFirstKey() == null || this.getLastKey() == null) { - // the file is empty - return false; - } - if (Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW) - && Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)) { - return true; - } - KeyValue smallestScanKeyValue = scan.isReversed() ? KeyValueUtil - .createFirstOnRow(scan.getStopRow()) : KeyValueUtil.createFirstOnRow(scan - .getStartRow()); - KeyValue largestScanKeyValue = scan.isReversed() ? KeyValueUtil - .createLastOnRow(scan.getStartRow()) : KeyValueUtil.createLastOnRow(scan - .getStopRow()); - boolean nonOverLapping = (getComparator().compareFlatKey( - this.getFirstKey(), largestScanKeyValue.getKey()) > 0 && !Bytes - .equals(scan.isReversed() ? scan.getStartRow() : scan.getStopRow(), - HConstants.EMPTY_END_ROW)) - || getComparator().compareFlatKey(this.getLastKey(), - smallestScanKeyValue.getKey()) < 0; - return !nonOverLapping; - } - - public Map loadFileInfo() throws IOException { - Map fi = reader.loadFileInfo(); - - byte[] b = fi.get(BLOOM_FILTER_TYPE_KEY); - if (b != null) { - bloomFilterType = BloomType.valueOf(Bytes.toString(b)); - } - - lastBloomKey = fi.get(LAST_BLOOM_KEY); - byte[] cnt = fi.get(DELETE_FAMILY_COUNT); - if (cnt != null) { - deleteFamilyCnt = Bytes.toLong(cnt); - } - - return fi; - } - - public void loadBloomfilter() { - this.loadBloomfilter(BlockType.GENERAL_BLOOM_META); - this.loadBloomfilter(BlockType.DELETE_FAMILY_BLOOM_META); - } - - private void loadBloomfilter(BlockType blockType) { - try { - if (blockType == BlockType.GENERAL_BLOOM_META) { - if (this.generalBloomFilter != null) - return; // Bloom has been loaded - - DataInput bloomMeta = reader.getGeneralBloomFilterMetadata(); - if (bloomMeta != null) { - // sanity check for NONE Bloom filter - if (bloomFilterType == BloomType.NONE) { - throw new IOException( - "valid bloom filter type not found in FileInfo"); - } else { - generalBloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, - reader); - if (LOG.isTraceEnabled()) { - LOG.trace("Loaded " + bloomFilterType.toString() + " " - + generalBloomFilter.getClass().getSimpleName() - + " metadata for " + reader.getName()); - } - } - } - } else if (blockType == BlockType.DELETE_FAMILY_BLOOM_META) { - if (this.deleteFamilyBloomFilter != null) - return; // Bloom has been loaded - - DataInput bloomMeta = reader.getDeleteBloomFilterMetadata(); - if (bloomMeta != null) { - deleteFamilyBloomFilter = BloomFilterFactory.createFromMeta( - bloomMeta, reader); - LOG.info("Loaded Delete Family Bloom (" - + deleteFamilyBloomFilter.getClass().getSimpleName() - + ") metadata for " + reader.getName()); - } - } else { - throw new RuntimeException("Block Type: " + blockType.toString() - + "is not supported for Bloom filter"); - } - } catch (IOException e) { - LOG.error("Error reading bloom filter meta for " + blockType - + " -- proceeding without", e); - setBloomFilterFaulty(blockType); - } catch (IllegalArgumentException e) { - LOG.error("Bad bloom filter meta " + blockType - + " -- proceeding without", e); - setBloomFilterFaulty(blockType); - } - } - - private void setBloomFilterFaulty(BlockType blockType) { - if (blockType == BlockType.GENERAL_BLOOM_META) { - setGeneralBloomFilterFaulty(); - } else if (blockType == BlockType.DELETE_FAMILY_BLOOM_META) { - setDeleteFamilyBloomFilterFaulty(); - } - } - - /** - * The number of Bloom filter entries in this store file, or an estimate - * thereof, if the Bloom filter is not loaded. This always returns an upper - * bound of the number of Bloom filter entries. - * - * @return an estimate of the number of Bloom filter entries in this file - */ - public long getFilterEntries() { - return generalBloomFilter != null ? generalBloomFilter.getKeyCount() - : reader.getEntries(); - } - - public void setGeneralBloomFilterFaulty() { - generalBloomFilter = null; - } - - public void setDeleteFamilyBloomFilterFaulty() { - this.deleteFamilyBloomFilter = null; - } - - public byte[] getLastKey() { - return reader.getLastKey(); - } - - public byte[] getLastRowKey() { - return reader.getLastRowKey(); - } - - public byte[] midkey() throws IOException { - return reader.midkey(); - } - - public long length() { - return reader.length(); - } - - public long getTotalUncompressedBytes() { - return reader.getTrailer().getTotalUncompressedBytes(); - } - - public long getEntries() { - return reader.getEntries(); - } - - public long getDeleteFamilyCnt() { - return deleteFamilyCnt; - } - - public byte[] getFirstKey() { - return reader.getFirstKey(); - } - - public long indexSize() { - return reader.indexSize(); - } - - public BloomType getBloomFilterType() { - return this.bloomFilterType; - } - - public long getSequenceID() { - return sequenceID; - } - - public void setSequenceID(long sequenceID) { - this.sequenceID = sequenceID; - } - - public void setBulkLoaded(boolean bulkLoadResult) { - this.bulkLoadResult = bulkLoadResult; - } - - public boolean isBulkLoaded() { - return this.bulkLoadResult; - } - - BloomFilter getGeneralBloomFilter() { - return generalBloomFilter; - } - - long getUncompressedDataIndexSize() { - return reader.getTrailer().getUncompressedDataIndexSize(); - } - - public long getTotalBloomSize() { - if (generalBloomFilter == null) - return 0; - return generalBloomFilter.getByteSize(); - } - - public int getHFileVersion() { - return reader.getTrailer().getMajorVersion(); - } - - public int getHFileMinorVersion() { - return reader.getTrailer().getMinorVersion(); - } - - public HFile.Reader getHFileReader() { - return reader; - } - - void disableBloomFilterForTesting() { - generalBloomFilter = null; - this.deleteFamilyBloomFilter = null; - } - - public long getMaxTimestamp() { - return timeRangeTracker == null ? Long.MAX_VALUE : timeRangeTracker.getMaximumTimestamp(); - } - } - - /** * Useful comparators for comparing StoreFiles. */ public abstract static class Comparators { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java index 54f200f..5e4cfd2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java @@ -217,7 +217,7 @@ public class StoreFileInfo { * @param cacheConf The cache configuration and block cache reference. * @return The StoreFile.Reader for the file */ - public StoreFile.Reader open(final FileSystem fs, + public StoreFileReader open(final FileSystem fs, final CacheConfig cacheConf, final boolean canUseDropBehind) throws IOException { FSDataInputStreamWrapper in; FileStatus status; @@ -241,7 +241,7 @@ public class StoreFileInfo { long length = status.getLen(); hdfsBlocksDistribution = computeHDFSBlocksDistribution(fs); - StoreFile.Reader reader = null; + StoreFileReader reader = null; if (this.coprocessorHost != null) { reader = this.coprocessorHost.preStoreFileReaderOpen(fs, this.getPath(), in, length, cacheConf, reference); @@ -251,7 +251,7 @@ public class StoreFileInfo { reader = new HalfStoreFileReader(fs, this.getPath(), in, length, cacheConf, reference, conf); } else { - reader = new StoreFile.Reader(fs, status.getPath(), in, length, cacheConf, conf); + reader = new StoreFileReader(fs, status.getPath(), in, length, cacheConf, conf); } } if (this.coprocessorHost != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java new file mode 100644 index 0000000..6bbc7b3 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java @@ -0,0 +1,559 @@ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; +import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.io.hfile.BlockType; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.io.hfile.HFileScanner; +import org.apache.hadoop.hbase.util.BloomFilter; +import org.apache.hadoop.hbase.util.BloomFilterFactory; +import org.apache.hadoop.hbase.util.Bytes; + +import java.io.DataInput; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Map; +import java.util.SortedSet; + +/** + * Reader for a StoreFile. + */ +public class StoreFileReader { + private static final Log LOG = LogFactory.getLog(StoreFileReader.class.getName()); + + protected BloomFilter generalBloomFilter = null; + protected BloomFilter deleteFamilyBloomFilter = null; + protected BloomType bloomFilterType; + private final HFile.Reader reader; + protected TimeRangeTracker timeRangeTracker = null; + protected long sequenceID = -1; + private byte[] lastBloomKey; + private long deleteFamilyCnt = -1; + private boolean bulkLoadResult = false; + + public StoreFileReader(FileSystem fs, Path path, CacheConfig cacheConf, Configuration conf) + throws IOException { + reader = HFile.createReader(fs, path, cacheConf, conf); + bloomFilterType = BloomType.NONE; + } + + public StoreFileReader(FileSystem fs, Path path, FSDataInputStreamWrapper in, long size, + CacheConfig cacheConf, Configuration conf) throws IOException { + reader = HFile.createReader(fs, path, in, size, cacheConf, conf); + bloomFilterType = BloomType.NONE; + } + + public void setReplicaStoreFile(boolean isPrimaryReplicaStoreFile) { + reader.setPrimaryReplicaReader(isPrimaryReplicaStoreFile); + } + public boolean isPrimaryReplicaReader() { + return reader.isPrimaryReplicaReader(); + } + + /** + * ONLY USE DEFAULT CONSTRUCTOR FOR UNIT TESTS + */ + StoreFileReader() { + this.reader = null; + } + + public KeyValue.KVComparator getComparator() { + return reader.getComparator(); + } + + /** + * Get a scanner to scan over this StoreFile. Do not use + * this overload if using this scanner for compactions. + * + * @param cacheBlocks should this scanner cache blocks? + * @param pread use pread (for highly concurrent small readers) + * @return a scanner + */ + public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, + boolean pread) { + return getStoreFileScanner(cacheBlocks, pread, false, + // 0 is passed as readpoint because this method is only used by test + // where StoreFile is directly operated upon + 0); + } + + /** + * Get a scanner to scan over this StoreFile. + * + * @param cacheBlocks should this scanner cache blocks? + * @param pread use pread (for highly concurrent small readers) + * @param isCompaction is scanner being used for compaction? + * @return a scanner + */ + public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, + boolean pread, + boolean isCompaction, long readPt) { + return new StoreFileScanner(this, + getScanner(cacheBlocks, pread, isCompaction), + !isCompaction, reader.hasMVCCInfo(), readPt); + } + + /** + * Warning: Do not write further code which depends on this call. Instead + * use getStoreFileScanner() which uses the StoreFileScanner class/interface + * which is the preferred way to scan a store with higher level concepts. + * + * @param cacheBlocks should we cache the blocks? + * @param pread use pread (for concurrent small readers) + * @return the underlying HFileScanner + */ + @Deprecated + public HFileScanner getScanner(boolean cacheBlocks, boolean pread) { + return getScanner(cacheBlocks, pread, false); + } + + /** + * Warning: Do not write further code which depends on this call. Instead + * use getStoreFileScanner() which uses the StoreFileScanner class/interface + * which is the preferred way to scan a store with higher level concepts. + * + * @param cacheBlocks + * should we cache the blocks? + * @param pread + * use pread (for concurrent small readers) + * @param isCompaction + * is scanner being used for compaction? + * @return the underlying HFileScanner + */ + @Deprecated + public HFileScanner getScanner(boolean cacheBlocks, boolean pread, + boolean isCompaction) { + return reader.getScanner(cacheBlocks, pread, isCompaction); + } + + public void close(boolean evictOnClose) throws IOException { + reader.close(evictOnClose); + } + + /** + * Check if this storeFile may contain keys within the TimeRange that + * have not expired (i.e. not older than oldestUnexpiredTS). + * @param timeRange the timeRange to restrict + * @param oldestUnexpiredTS the oldest timestamp that is not expired, as + * determined by the column family's TTL + * @return false if queried keys definitely don't exist in this StoreFile + */ + boolean passesTimerangeFilter(TimeRange timeRange, long oldestUnexpiredTS) { + if (timeRangeTracker == null) { + return true; + } else { + return timeRangeTracker.includesTimeRange(timeRange) && + timeRangeTracker.getMaximumTimestamp() >= oldestUnexpiredTS; + } + } + + /** + * Checks whether the given scan passes the Bloom filter (if present). Only + * checks Bloom filters for single-row or single-row-column scans. Bloom + * filter checking for multi-gets is implemented as part of the store + * scanner system (see {@link StoreFileScanner#seekExactly}) and uses + * the lower-level API {@link #passesGeneralBloomFilter(byte[], int, int, byte[], + * int, int)}. + * + * @param scan the scan specification. Used to determine the row, and to + * check whether this is a single-row ("get") scan. + * @param columns the set of columns. Only used for row-column Bloom + * filters. + * @return true if the scan with the given column set passes the Bloom + * filter, or if the Bloom filter is not applicable for the scan. + * False if the Bloom filter is applicable and the scan fails it. + */ + boolean passesBloomFilter(Scan scan, + final SortedSet columns) { + // Multi-column non-get scans will use Bloom filters through the + // lower-level API function that this function calls. + if (!scan.isGetScan()) { + return true; + } + + byte[] row = scan.getStartRow(); + switch (this.bloomFilterType) { + case ROW: + return passesGeneralBloomFilter(row, 0, row.length, null, 0, 0); + + case ROWCOL: + if (columns != null && columns.size() == 1) { + byte[] column = columns.first(); + return passesGeneralBloomFilter(row, 0, row.length, column, 0, + column.length); + } + + // For multi-column queries the Bloom filter is checked from the + // seekExact operation. + return true; + + default: + return true; + } + } + + public boolean passesDeleteFamilyBloomFilter(byte[] row, int rowOffset, + int rowLen) { + // Cache Bloom filter as a local variable in case it is set to null by + // another thread on an IO error. + BloomFilter bloomFilter = this.deleteFamilyBloomFilter; + + // Empty file or there is no delete family at all + if (reader.getTrailer().getEntryCount() == 0 || deleteFamilyCnt == 0) { + return false; + } + + if (bloomFilter == null) { + return true; + } + + try { + if (!bloomFilter.supportsAutoLoading()) { + return true; + } + return bloomFilter.contains(row, rowOffset, rowLen, null); + } catch (IllegalArgumentException e) { + LOG.error("Bad Delete Family bloom filter data -- proceeding without", + e); + setDeleteFamilyBloomFilterFaulty(); + } + + return true; + } + + /** + * A method for checking Bloom filters. Called directly from + * StoreFileScanner in case of a multi-column query. + * + * @param row + * @param rowOffset + * @param rowLen + * @param col + * @param colOffset + * @param colLen + * @return True if passes + */ + public boolean passesGeneralBloomFilter(byte[] row, int rowOffset, + int rowLen, byte[] col, int colOffset, int colLen) { + // Cache Bloom filter as a local variable in case it is set to null by + // another thread on an IO error. + BloomFilter bloomFilter = this.generalBloomFilter; + if (bloomFilter == null) { + return true; + } + + byte[] key; + switch (bloomFilterType) { + case ROW: + if (col != null) { + throw new RuntimeException("Row-only Bloom filter called with " + + "column specified"); + } + if (rowOffset != 0 || rowLen != row.length) { + throw new AssertionError("For row-only Bloom filters the row " + + "must occupy the whole array"); + } + key = row; + break; + + case ROWCOL: + key = bloomFilter.createBloomKey(row, rowOffset, rowLen, col, + colOffset, colLen); + + break; + + default: + return true; + } + + // Empty file + if (reader.getTrailer().getEntryCount() == 0) + return false; + + try { + boolean shouldCheckBloom; + ByteBuffer bloom; + if (bloomFilter.supportsAutoLoading()) { + bloom = null; + shouldCheckBloom = true; + } else { + bloom = reader.getMetaBlock(HFile.BLOOM_FILTER_DATA_KEY, + true); + shouldCheckBloom = bloom != null; + } + + if (shouldCheckBloom) { + boolean exists; + + // Whether the primary Bloom key is greater than the last Bloom key + // from the file info. For row-column Bloom filters this is not yet + // a sufficient condition to return false. + boolean keyIsAfterLast = lastBloomKey != null + && bloomFilter.getComparator().compareFlatKey(key, lastBloomKey) > 0; + + if (bloomFilterType == BloomType.ROWCOL) { + // Since a Row Delete is essentially a DeleteFamily applied to all + // columns, a file might be skipped if using row+col Bloom filter. + // In order to ensure this file is included an additional check is + // required looking only for a row bloom. + byte[] rowBloomKey = bloomFilter.createBloomKey(row, rowOffset, rowLen, + null, 0, 0); + + if (keyIsAfterLast + && bloomFilter.getComparator().compareFlatKey(rowBloomKey, + lastBloomKey) > 0) { + exists = false; + } else { + exists = + bloomFilter.contains(key, 0, key.length, bloom) || + bloomFilter.contains(rowBloomKey, 0, rowBloomKey.length, + bloom); + } + } else { + exists = !keyIsAfterLast + && bloomFilter.contains(key, 0, key.length, bloom); + } + + return exists; + } + } catch (IOException e) { + LOG.error("Error reading bloom filter data -- proceeding without", + e); + setGeneralBloomFilterFaulty(); + } catch (IllegalArgumentException e) { + LOG.error("Bad bloom filter data -- proceeding without", e); + setGeneralBloomFilterFaulty(); + } + + return true; + } + + /** + * Checks whether the given scan rowkey range overlaps with the current storefile's + * @param scan the scan specification. Used to determine the rowkey range. + * @return true if there is overlap, false otherwise + */ + public boolean passesKeyRangeFilter(Scan scan) { + if (this.getFirstKey() == null || this.getLastKey() == null) { + // the file is empty + return false; + } + if (Bytes.equals(scan.getStartRow(), HConstants.EMPTY_START_ROW) + && Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)) { + return true; + } + KeyValue smallestScanKeyValue = scan.isReversed() ? KeyValueUtil + .createFirstOnRow(scan.getStopRow()) : KeyValueUtil.createFirstOnRow(scan + .getStartRow()); + KeyValue largestScanKeyValue = scan.isReversed() ? KeyValueUtil + .createLastOnRow(scan.getStartRow()) : KeyValueUtil.createLastOnRow(scan + .getStopRow()); + boolean nonOverLapping = (getComparator().compareFlatKey( + this.getFirstKey(), largestScanKeyValue.getKey()) > 0 && !Bytes + .equals(scan.isReversed() ? scan.getStartRow() : scan.getStopRow(), + HConstants.EMPTY_END_ROW)) + || getComparator().compareFlatKey(this.getLastKey(), + smallestScanKeyValue.getKey()) < 0; + return !nonOverLapping; + } + + public Map loadFileInfo() throws IOException { + Map fi = reader.loadFileInfo(); + + byte[] b = fi.get(StoreFile.BLOOM_FILTER_TYPE_KEY); + if (b != null) { + bloomFilterType = BloomType.valueOf(Bytes.toString(b)); + } + + lastBloomKey = fi.get(StoreFile.LAST_BLOOM_KEY); + byte[] cnt = fi.get(StoreFile.DELETE_FAMILY_COUNT); + if (cnt != null) { + deleteFamilyCnt = Bytes.toLong(cnt); + } + + return fi; + } + + public void loadBloomfilter() { + this.loadBloomfilter(BlockType.GENERAL_BLOOM_META); + this.loadBloomfilter(BlockType.DELETE_FAMILY_BLOOM_META); + } + + public void loadBloomfilter(BlockType blockType) { + try { + if (blockType == BlockType.GENERAL_BLOOM_META) { + if (this.generalBloomFilter != null) + return; // Bloom has been loaded + + DataInput bloomMeta = reader.getGeneralBloomFilterMetadata(); + if (bloomMeta != null) { + // sanity check for NONE Bloom filter + if (bloomFilterType == BloomType.NONE) { + throw new IOException( + "valid bloom filter type not found in FileInfo"); + } else { + generalBloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, + reader); + if (LOG.isTraceEnabled()) { + LOG.trace("Loaded " + bloomFilterType.toString() + " " + + generalBloomFilter.getClass().getSimpleName() + + " metadata for " + reader.getName()); + } + } + } + } else if (blockType == BlockType.DELETE_FAMILY_BLOOM_META) { + if (this.deleteFamilyBloomFilter != null) + return; // Bloom has been loaded + + DataInput bloomMeta = reader.getDeleteBloomFilterMetadata(); + if (bloomMeta != null) { + deleteFamilyBloomFilter = BloomFilterFactory.createFromMeta( + bloomMeta, reader); + LOG.info("Loaded Delete Family Bloom (" + + deleteFamilyBloomFilter.getClass().getSimpleName() + + ") metadata for " + reader.getName()); + } + } else { + throw new RuntimeException("Block Type: " + blockType.toString() + + "is not supported for Bloom filter"); + } + } catch (IOException e) { + LOG.error("Error reading bloom filter meta for " + blockType + + " -- proceeding without", e); + setBloomFilterFaulty(blockType); + } catch (IllegalArgumentException e) { + LOG.error("Bad bloom filter meta " + blockType + + " -- proceeding without", e); + setBloomFilterFaulty(blockType); + } + } + + private void setBloomFilterFaulty(BlockType blockType) { + if (blockType == BlockType.GENERAL_BLOOM_META) { + setGeneralBloomFilterFaulty(); + } else if (blockType == BlockType.DELETE_FAMILY_BLOOM_META) { + setDeleteFamilyBloomFilterFaulty(); + } + } + + /** + * The number of Bloom filter entries in this store file, or an estimate + * thereof, if the Bloom filter is not loaded. This always returns an upper + * bound of the number of Bloom filter entries. + * + * @return an estimate of the number of Bloom filter entries in this file + */ + public long getFilterEntries() { + return generalBloomFilter != null ? generalBloomFilter.getKeyCount() + : reader.getEntries(); + } + + public void setGeneralBloomFilterFaulty() { + generalBloomFilter = null; + } + + public void setDeleteFamilyBloomFilterFaulty() { + this.deleteFamilyBloomFilter = null; + } + + public byte[] getLastKey() { + return reader.getLastKey(); + } + + public byte[] getLastRowKey() { + return reader.getLastRowKey(); + } + + public byte[] midkey() throws IOException { + return reader.midkey(); + } + + public long length() { + return reader.length(); + } + + public long getTotalUncompressedBytes() { + return reader.getTrailer().getTotalUncompressedBytes(); + } + + public long getEntries() { + return reader.getEntries(); + } + + public long getDeleteFamilyCnt() { + return deleteFamilyCnt; + } + + public byte[] getFirstKey() { + return reader.getFirstKey(); + } + + public long indexSize() { + return reader.indexSize(); + } + + public BloomType getBloomFilterType() { + return this.bloomFilterType; + } + + public long getSequenceID() { + return sequenceID; + } + + public void setSequenceID(long sequenceID) { + this.sequenceID = sequenceID; + } + + public void setBulkLoaded(boolean bulkLoadResult) { + this.bulkLoadResult = bulkLoadResult; + } + + public boolean isBulkLoaded() { + return this.bulkLoadResult; + } + + BloomFilter getGeneralBloomFilter() { + return generalBloomFilter; + } + + long getUncompressedDataIndexSize() { + return reader.getTrailer().getUncompressedDataIndexSize(); + } + + public long getTotalBloomSize() { + if (generalBloomFilter == null) + return 0; + return generalBloomFilter.getByteSize(); + } + + public int getHFileVersion() { + return reader.getTrailer().getMajorVersion(); + } + + public int getHFileMinorVersion() { + return reader.getTrailer().getMinorVersion(); + } + + public HFile.Reader getHFileReader() { + return reader; + } + + void disableBloomFilterForTesting() { + generalBloomFilter = null; + this.deleteFamilyBloomFilter = null; + } + + public long getMaxTimestamp() { + return timeRangeTracker == null ? Long.MAX_VALUE : timeRangeTracker.getMaximumTimestamp(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index 0785db1..7fc558e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.hfile.HFileScanner; -import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; /** * KeyValueScanner adaptor over the Reader. It also provides hooks into @@ -48,7 +47,7 @@ public class StoreFileScanner implements KeyValueScanner { private static final Log LOG = LogFactory.getLog(HStore.class); // the reader it comes from: - private final StoreFile.Reader reader; + private final StoreFileReader reader; private final HFileScanner hfs; private Cell cur = null; @@ -72,7 +71,7 @@ public class StoreFileScanner implements KeyValueScanner { * Implements a {@link KeyValueScanner} on top of the specified {@link HFileScanner} * @param hfs HFile scanner */ - public StoreFileScanner(StoreFile.Reader reader, HFileScanner hfs, boolean useMVCC, + public StoreFileScanner(StoreFileReader reader, HFileScanner hfs, boolean useMVCC, boolean hasMVCC, long readPt) { this.readPt = readPt; this.reader = reader; @@ -119,7 +118,7 @@ public class StoreFileScanner implements KeyValueScanner { List scanners = new ArrayList( files.size()); for (StoreFile file : files) { - StoreFile.Reader r = file.createReader(canUseDrop); + StoreFileReader r = file.createReader(canUseDrop); r.setReplicaStoreFile(isPrimaryReplica); StoreFileScanner scanner = r.getStoreFileScanner(cacheBlocks, usePread, isCompaction, readPt); @@ -386,7 +385,7 @@ public class StoreFileScanner implements KeyValueScanner { return true; } - Reader getReader() { + StoreFileReader getReader() { return reader; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java new file mode 100644 index 0000000..20ceb3d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java @@ -0,0 +1,458 @@ +package org.apache.hadoop.hbase.regionserver; + +import com.google.common.base.Preconditions; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.io.hfile.HFileContext; +import org.apache.hadoop.hbase.regionserver.compactions.Compactor; +import org.apache.hadoop.hbase.util.BloomFilterFactory; +import org.apache.hadoop.hbase.util.BloomFilterWriter; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.WritableUtils; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Arrays; + +/** + * A StoreFile writer. Use this to read/write HBase Store Files. It is package + * local because it is an implementation detail of the HBase regionserver. + */ +public class StoreFileWriter implements Compactor.CellSink { + private static final Log LOG = LogFactory.getLog(StoreFileWriter.class.getName()); + + private final BloomFilterWriter generalBloomFilterWriter; + private final BloomFilterWriter deleteFamilyBloomFilterWriter; + private final BloomType bloomType; + private byte[] lastBloomKey; + private int lastBloomKeyOffset, lastBloomKeyLen; + private KeyValue.KVComparator kvComparator; + private Cell lastCell = null; + private long earliestPutTs = HConstants.LATEST_TIMESTAMP; + private Cell lastDeleteFamilyCell = null; + private long deleteFamilyCnt = 0; + + TimeRangeTracker timeRangeTracker = new TimeRangeTracker(); + /* isTimeRangeTrackerSet keeps track if the timeRange has already been set + * When flushing a memstore, we set TimeRange and use this variable to + * indicate that it doesn't need to be calculated again while + * appending KeyValues. + * It is not set in cases of compactions when it is recalculated using only + * the appended KeyValues*/ + boolean isTimeRangeTrackerSet = false; + + protected HFile.Writer writer; + + /** + * Creates an HFile.Writer that also write helpful meta data. + * @param fs file system to write to + * @param path file name to create + * @param conf user configuration + * @param comparator key comparator + * @param bloomType bloom filter setting + * @param maxKeys the expected maximum number of keys to be added. Was used + * for Bloom filter size in {@link HFile} format version 1. + * @param favoredNodes + * @param fileContext - The HFile context + * @throws IOException problem writing to FS + */ + StoreFileWriter(FileSystem fs, Path path, final Configuration conf, CacheConfig cacheConf, + final KeyValue.KVComparator comparator, BloomType bloomType, long maxKeys, + InetSocketAddress[] favoredNodes, HFileContext fileContext) + throws IOException { + writer = HFile.getWriterFactory(conf, cacheConf) + .withPath(fs, path) + .withComparator(comparator) + .withFavoredNodes(favoredNodes) + .withFileContext(fileContext) + .create(); + + this.kvComparator = comparator; + + generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite( + conf, cacheConf, bloomType, + (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); + + if (generalBloomFilterWriter != null) { + this.bloomType = bloomType; + if (StoreFileWriter.LOG.isTraceEnabled()) StoreFileWriter.LOG.trace("Bloom filter type for " + path + ": " + + this.bloomType + ", " + generalBloomFilterWriter.getClass().getSimpleName()); + } else { + // Not using Bloom filters. + this.bloomType = BloomType.NONE; + } + + // initialize delete family Bloom filter when there is NO RowCol Bloom + // filter + if (this.bloomType != BloomType.ROWCOL) { + this.deleteFamilyBloomFilterWriter = BloomFilterFactory + .createDeleteBloomAtWrite(conf, cacheConf, + (int) Math.min(maxKeys, Integer.MAX_VALUE), writer); + } else { + deleteFamilyBloomFilterWriter = null; + } + if (deleteFamilyBloomFilterWriter != null) { + if (StoreFileWriter.LOG.isTraceEnabled()) StoreFileWriter.LOG.trace("Delete Family Bloom filter type for " + path + ": " + + deleteFamilyBloomFilterWriter.getClass().getSimpleName()); + } + } + + /** + * Writes meta data. + * Call before {@link #close()} since its written as meta data to this file. + * @param maxSequenceId Maximum sequence id. + * @param majorCompaction True if this file is product of a major compaction + * @throws IOException problem writing to FS + */ + public void appendMetadata(final long maxSequenceId, final boolean majorCompaction) + throws IOException { + writer.appendFileInfo(StoreFile.MAX_SEQ_ID_KEY, Bytes.toBytes(maxSequenceId)); + writer.appendFileInfo(StoreFile.MAJOR_COMPACTION_KEY, + Bytes.toBytes(majorCompaction)); + appendTrackedTimestampsToMetadata(); + } + + /** + * Add TimestampRange and earliest put timestamp to Metadata + */ + public void appendTrackedTimestampsToMetadata() throws IOException { + appendFileInfo(StoreFile.TIMERANGE_KEY, WritableUtils.toByteArray(timeRangeTracker)); + appendFileInfo(StoreFile.EARLIEST_PUT_TS, Bytes.toBytes(earliestPutTs)); + } + + /** + * Set TimeRangeTracker + * @param trt + */ + public void setTimeRangeTracker(final TimeRangeTracker trt) { + this.timeRangeTracker = trt; + isTimeRangeTrackerSet = true; + } + + /** + * Record the earlest Put timestamp. + * + * If the timeRangeTracker is not set, + * update TimeRangeTracker to include the timestamp of this key + * @param cell + */ + public void trackTimestamps(final Cell cell) { + if (KeyValue.Type.Put.getCode() == cell.getTypeByte()) { + earliestPutTs = Math.min(earliestPutTs, cell.getTimestamp()); + } + if (!isTimeRangeTrackerSet) { + timeRangeTracker.includeTimestamp(cell); + } + } + + private void appendGeneralBloomfilter(final Cell cell) throws IOException { + if (this.generalBloomFilterWriter != null) { + // only add to the bloom filter on a new, unique key + boolean newKey = true; + if (this.lastCell != null) { + switch(bloomType) { + case ROW: + newKey = ! kvComparator.matchingRows(cell, lastCell); + break; + case ROWCOL: + newKey = ! kvComparator.matchingRowColumn(cell, lastCell); + break; + case NONE: + newKey = false; + break; + default: + throw new IOException("Invalid Bloom filter type: " + bloomType + + " (ROW or ROWCOL expected)"); + } + } + if (newKey) { + /* + * http://2.bp.blogspot.com/_Cib_A77V54U/StZMrzaKufI/AAAAAAAAADo/ZhK7bGoJdMQ/s400/KeyValue.png + * Key = RowLen + Row + FamilyLen + Column [Family + Qualifier] + TimeStamp + * + * 2 Types of Filtering: + * 1. Row = Row + * 2. RowCol = Row + Qualifier + */ + byte[] bloomKey; + int bloomKeyOffset, bloomKeyLen; + + switch (bloomType) { + case ROW: + bloomKey = cell.getRowArray(); + bloomKeyOffset = cell.getRowOffset(); + bloomKeyLen = cell.getRowLength(); + break; + case ROWCOL: + // merge(row, qualifier) + // TODO: could save one buffer copy in case of compound Bloom + // filters when this involves creating a KeyValue + bloomKey = generalBloomFilterWriter.createBloomKey(cell.getRowArray(), + cell.getRowOffset(), cell.getRowLength(), cell.getQualifierArray(), + cell.getQualifierOffset(), cell.getQualifierLength()); + bloomKeyOffset = 0; + bloomKeyLen = bloomKey.length; + break; + default: + throw new IOException("Invalid Bloom filter type: " + bloomType + + " (ROW or ROWCOL expected)"); + } + generalBloomFilterWriter.add(bloomKey, bloomKeyOffset, bloomKeyLen); + if (lastBloomKey != null + && generalBloomFilterWriter.getComparator().compareFlatKey(bloomKey, + bloomKeyOffset, bloomKeyLen, lastBloomKey, + lastBloomKeyOffset, lastBloomKeyLen) <= 0) { + throw new IOException("Non-increasing Bloom keys: " + + Bytes.toStringBinary(bloomKey, bloomKeyOffset, bloomKeyLen) + + " after " + + Bytes.toStringBinary(lastBloomKey, lastBloomKeyOffset, + lastBloomKeyLen)); + } + lastBloomKey = bloomKey; + lastBloomKeyOffset = bloomKeyOffset; + lastBloomKeyLen = bloomKeyLen; + this.lastCell = cell; + } + } + } + + private void appendDeleteFamilyBloomFilter(final Cell cell) + throws IOException { + if (!CellUtil.isDeleteFamily(cell) && !CellUtil.isDeleteFamilyVersion(cell)) { + return; + } + + // increase the number of delete family in the store file + deleteFamilyCnt++; + if (null != this.deleteFamilyBloomFilterWriter) { + boolean newKey = true; + if (lastDeleteFamilyCell != null) { + newKey = !kvComparator.matchingRows(cell, lastDeleteFamilyCell); + } + if (newKey) { + this.deleteFamilyBloomFilterWriter.add(cell.getRowArray(), + cell.getRowOffset(), cell.getRowLength()); + this.lastDeleteFamilyCell = cell; + } + } + } + + public void append(final Cell cell) throws IOException { + appendGeneralBloomfilter(cell); + appendDeleteFamilyBloomFilter(cell); + writer.append(cell); + trackTimestamps(cell); + } + + public Path getPath() { + return this.writer.getPath(); + } + + boolean hasGeneralBloom() { + return this.generalBloomFilterWriter != null; + } + + /** + * For unit testing only. + * + * @return the Bloom filter used by this writer. + */ + BloomFilterWriter getGeneralBloomWriter() { + return generalBloomFilterWriter; + } + + private boolean closeBloomFilter(BloomFilterWriter bfw) throws IOException { + boolean haveBloom = (bfw != null && bfw.getKeyCount() > 0); + if (haveBloom) { + bfw.compactBloom(); + } + return haveBloom; + } + + private boolean closeGeneralBloomFilter() throws IOException { + boolean hasGeneralBloom = closeBloomFilter(generalBloomFilterWriter); + + // add the general Bloom filter writer and append file info + if (hasGeneralBloom) { + writer.addGeneralBloomFilter(generalBloomFilterWriter); + writer.appendFileInfo(StoreFile.BLOOM_FILTER_TYPE_KEY, + Bytes.toBytes(bloomType.toString())); + if (lastBloomKey != null) { + writer.appendFileInfo(StoreFile.LAST_BLOOM_KEY, Arrays.copyOfRange( + lastBloomKey, lastBloomKeyOffset, lastBloomKeyOffset + + lastBloomKeyLen)); + } + } + return hasGeneralBloom; + } + + private boolean closeDeleteFamilyBloomFilter() throws IOException { + boolean hasDeleteFamilyBloom = closeBloomFilter(deleteFamilyBloomFilterWriter); + + // add the delete family Bloom filter writer + if (hasDeleteFamilyBloom) { + writer.addDeleteFamilyBloomFilter(deleteFamilyBloomFilterWriter); + } + + // append file info about the number of delete family kvs + // even if there is no delete family Bloom. + writer.appendFileInfo(StoreFile.DELETE_FAMILY_COUNT, + Bytes.toBytes(this.deleteFamilyCnt)); + + return hasDeleteFamilyBloom; + } + + public void close() throws IOException { + boolean hasGeneralBloom = this.closeGeneralBloomFilter(); + boolean hasDeleteFamilyBloom = this.closeDeleteFamilyBloomFilter(); + + writer.close(); + + // Log final Bloom filter statistics. This needs to be done after close() + // because compound Bloom filters might be finalized as part of closing. + if (StoreFileWriter.LOG.isTraceEnabled()) { + StoreFileWriter.LOG.trace((hasGeneralBloom ? "" : "NO ") + "General Bloom and " + + (hasDeleteFamilyBloom ? "" : "NO ") + "DeleteFamily" + " was added to HFile " + + getPath()); + } + + } + + public void appendFileInfo(byte[] key, byte[] value) throws IOException { + writer.appendFileInfo(key, value); + } + + /** For use in testing, e.g. {@link org.apache.hadoop.hbase.regionserver.CreateRandomStoreFile} + */ + HFile.Writer getHFileWriter() { + return writer; + } + + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="ICAST_INTEGER_MULTIPLY_CAST_TO_LONG", + justification="Will not overflow") + public static class Builder { + private final Configuration conf; + private final CacheConfig cacheConf; + private final FileSystem fs; + + private KeyValue.KVComparator comparator = KeyValue.COMPARATOR; + private BloomType bloomType = BloomType.NONE; + private long maxKeyCount = 0; + private Path dir; + private Path filePath; + private InetSocketAddress[] favoredNodes; + private HFileContext fileContext; + + public Builder(Configuration conf, CacheConfig cacheConf, + FileSystem fs) { + this.conf = conf; + this.cacheConf = cacheConf; + this.fs = fs; + } + + /** + * Use either this method or {@link #withFilePath}, but not both. + * @param dir Path to column family directory. The directory is created if + * does not exist. The file is given a unique name within this + * directory. + * @return this (for chained invocation) + */ + public Builder withOutputDir(Path dir) { + Preconditions.checkNotNull(dir); + this.dir = dir; + return this; + } + + /** + * Use either this method or {@link #withOutputDir}, but not both. + * @param filePath the StoreFile path to write + * @return this (for chained invocation) + */ + public Builder withFilePath(Path filePath) { + Preconditions.checkNotNull(filePath); + this.filePath = filePath; + return this; + } + + /** + * @param favoredNodes an array of favored nodes or possibly null + * @return this (for chained invocation) + */ + public Builder withFavoredNodes(InetSocketAddress[] favoredNodes) { + this.favoredNodes = favoredNodes; + return this; + } + + public Builder withComparator(KeyValue.KVComparator comparator) { + Preconditions.checkNotNull(comparator); + this.comparator = comparator; + return this; + } + + public Builder withBloomType(BloomType bloomType) { + Preconditions.checkNotNull(bloomType); + this.bloomType = bloomType; + return this; + } + + /** + * @param maxKeyCount estimated maximum number of keys we expect to add + * @return this (for chained invocation) + */ + public Builder withMaxKeyCount(long maxKeyCount) { + this.maxKeyCount = maxKeyCount; + return this; + } + + public Builder withFileContext(HFileContext fileContext) { + this.fileContext = fileContext; + return this; + } + + public Builder withShouldDropCacheBehind(boolean shouldDropCacheBehind/*NOT USED!!*/) { + // TODO: HAS NO EFFECT!!! FIX!! + return this; + } + /** + * Create a store file writer. Client is responsible for closing file when + * done. If metadata, add BEFORE closing using + * {@link StoreFileWriter#appendMetadata}. + */ + public StoreFileWriter build() throws IOException { + if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) { + throw new IllegalArgumentException("Either specify parent directory " + + "or file path"); + } + + if (dir == null) { + dir = filePath.getParent(); + } + + if (!fs.exists(dir)) { + fs.mkdirs(dir); + } + + if (filePath == null) { + filePath = StoreFile.getUniqueFile(fs, dir); + if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) { + bloomType = BloomType.NONE; + } + } + + if (comparator == null) { + comparator = KeyValue.COMPARATOR; + } + return new StoreFileWriter(fs, filePath, + conf, cacheConf, comparator, bloomType, maxKeyCount, favoredNodes, fileContext); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java index bcc0a90..7c2dff1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java @@ -56,7 +56,7 @@ abstract class StoreFlusher { public abstract List flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushSeqNum, MonitoredTask status) throws IOException; - protected void finalizeWriter(StoreFile.Writer writer, long cacheFlushSeqNum, + protected void finalizeWriter(StoreFileWriter writer, long cacheFlushSeqNum, MonitoredTask status) throws IOException { // Write out the log sequence number that corresponds to this output // hfile. Also write current time in metadata as minFlushTime. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java index 3d4e990..196c889 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java @@ -74,7 +74,7 @@ public class StoreUtils { long maxSize = -1L; StoreFile largestSf = null; for (StoreFile sf : candidates) { - StoreFile.Reader r = sf.getReader(); + StoreFileReader r = sf.getReader(); if (r == null) continue; long size = r.length(); if (size > maxSize) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java index 5b4c4db..d310c19 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeMultiFileWriter.java @@ -42,7 +42,7 @@ public abstract class StripeMultiFileWriter implements Compactor.CellSink { protected WriterFactory writerFactory; protected KVComparator comparator; - protected List existingWriters; + protected List existingWriters; protected List boundaries; /** Source scanner that is tracking KV count; may be null if source is not StoreScanner */ protected StoreScanner sourceScanner; @@ -51,7 +51,7 @@ public abstract class StripeMultiFileWriter implements Compactor.CellSink { private boolean doWriteStripeMetadata = true; public interface WriterFactory { - public StoreFile.Writer createWriter() throws IOException; + public StoreFileWriter createWriter() throws IOException; } /** @@ -79,7 +79,7 @@ public abstract class StripeMultiFileWriter implements Compactor.CellSink { + "riting out metadata for " + this.existingWriters.size() + " writers"); List paths = new ArrayList(); for (int i = 0; i < this.existingWriters.size(); ++i) { - StoreFile.Writer writer = this.existingWriters.get(i); + StoreFileWriter writer = this.existingWriters.get(i); if (writer == null) continue; // writer was skipped due to 0 KVs if (doWriteStripeMetadata) { writer.appendFileInfo(StripeStoreFileManager.STRIPE_START_KEY, this.boundaries.get(i)); @@ -96,7 +96,7 @@ public abstract class StripeMultiFileWriter implements Compactor.CellSink { public List abortWriters() { assert this.existingWriters != null; List paths = new ArrayList(); - for (StoreFile.Writer writer : this.existingWriters) { + for (StoreFileWriter writer : this.existingWriters) { try { paths.add(writer.getPath()); writer.close(); @@ -156,7 +156,7 @@ public abstract class StripeMultiFileWriter implements Compactor.CellSink { * will end up in one file, and separate from all other such pairs. */ public static class BoundaryMultiWriter extends StripeMultiFileWriter { - private StoreFile.Writer currentWriter; + private StoreFileWriter currentWriter; private byte[] currentWriterEndKey; private Cell lastCell; @@ -175,7 +175,7 @@ public abstract class StripeMultiFileWriter implements Compactor.CellSink { byte[] majorRangeFrom, byte[] majorRangeTo) throws IOException { super(); this.boundaries = targetBoundaries; - this.existingWriters = new ArrayList(this.boundaries.size() - 1); + this.existingWriters = new ArrayList(this.boundaries.size() - 1); // "major" range (range for which all files are included) boundaries, if any, // must match some target boundaries, let's find them. assert (majorRangeFrom == null) == (majorRangeTo == null); @@ -296,7 +296,7 @@ public abstract class StripeMultiFileWriter implements Compactor.CellSink { private byte[] right; private Cell lastCell; - private StoreFile.Writer currentWriter; + private StoreFileWriter currentWriter; protected byte[] lastRowInCurrentWriter = null; private long cellsInCurrentWriter = 0; private long cellsSeen = 0; @@ -315,7 +315,7 @@ public abstract class StripeMultiFileWriter implements Compactor.CellSink { this.left = left; this.right = right; int preallocate = Math.min(this.targetCount, 64); - this.existingWriters = new ArrayList(preallocate); + this.existingWriters = new ArrayList(preallocate); this.boundaries = new ArrayList(preallocate + 1); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java index 37e7402..89c9a7d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java @@ -30,8 +30,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.monitoring.MonitoredTask; -import org.apache.hadoop.hbase.regionserver.StoreFile.Writer; -import org.apache.hadoop.hbase.regionserver.StripeMultiFileWriter; import org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy; import com.google.common.annotations.VisibleForTesting; @@ -107,8 +105,8 @@ public class StripeStoreFlusher extends StoreFlusher { final TimeRangeTracker tracker, final long kvCount) { return new StripeMultiFileWriter.WriterFactory() { @Override - public Writer createWriter() throws IOException { - StoreFile.Writer writer = store.createWriterInTmp( + public StoreFileWriter createWriter() throws IOException { + StoreFileWriter writer = store.createWriterInTmp( kvCount, store.getFamily().getCompression(), /* isCompaction = */ false, /* includeMVCCReadpoint = */ true, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java index 5d44224..e8a5d16 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; -import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; +import org.apache.hadoop.hbase.regionserver.StoreFileReader; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.util.StringUtils; @@ -230,7 +230,7 @@ public class CompactionRequest implements Comparable { private void recalculateSize() { long sz = 0; for (StoreFile sf : this.filesToCompact) { - Reader r = sf.getReader(); + StoreFileReader r = sf.getReader(); sz += r == null ? 0 : r.length(); } this.totalSize = sz; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index 3a2fa7d..a7d5a34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileReader; import org.apache.hadoop.hbase.regionserver.StoreFileScanner; import org.apache.hadoop.hbase.regionserver.StoreScanner; import org.apache.hadoop.hbase.security.User; @@ -126,7 +127,7 @@ public abstract class Compactor { } long seqNum = file.getMaxSequenceId(); fd.maxSeqId = Math.max(fd.maxSeqId, seqNum); - StoreFile.Reader r = file.getReader(); + StoreFileReader r = file.getReader(); if (r == null) { LOG.warn("Null reader for " + file.getPath()); continue; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java index 311540b..7c35678 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactor.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileScanner; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.security.User; /** @@ -76,7 +77,7 @@ public class DefaultCompactor extends Compactor { store.throttleCompaction(request.getSize())); } - StoreFile.Writer writer = null; + StoreFileWriter writer = null; List newFiles = new ArrayList(); boolean cleanSeqId = false; IOException e = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java index ac88253..f7061f3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactor.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileScanner; import org.apache.hadoop.hbase.regionserver.StoreScanner; import org.apache.hadoop.hbase.regionserver.StripeMultiFileWriter; -import org.apache.hadoop.hbase.regionserver.StoreFile.Writer; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; @@ -138,7 +138,7 @@ public class StripeCompactor extends Compactor { final Compression.Algorithm compression = store.getFamily().getCompactionCompression(); StripeMultiFileWriter.WriterFactory factory = new StripeMultiFileWriter.WriterFactory() { @Override - public Writer createWriter() throws IOException { + public StoreFileWriter createWriter() throws IOException { return store.createWriterInTmp( fd.maxKeyCount, compression, true, needMvcc, fd.maxTagsLength > 0, store.throttleCompaction(request.getSize())); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java index 25e98a2..58e247f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompoundBloomFilterWriter.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex; import org.apache.hadoop.hbase.io.hfile.InlineBlockWriter; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.io.Writable; /** @@ -154,7 +155,7 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase * Adds a Bloom filter key. This key must be greater than the previous key, * as defined by the comparator this compound Bloom filter is configured * with. For efficiency, key monotonicity is not checked here. See - * {@link org.apache.hadoop.hbase.regionserver.StoreFile.Writer#append( + * {@link StoreFileWriter#append( * org.apache.hadoop.hbase.Cell)} for the details of deduplication. */ @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java index dbcf2e9..3ad2af8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java @@ -64,7 +64,7 @@ import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; -import org.apache.hadoop.hbase.regionserver.StoreFile.Reader; +import org.apache.hadoop.hbase.regionserver.StoreFileReader; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; @@ -703,17 +703,17 @@ public class SimpleRegionObserver extends BaseRegionObserver { } @Override - public Reader preStoreFileReaderOpen(ObserverContext ctx, + public StoreFileReader preStoreFileReaderOpen(ObserverContext ctx, FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, - Reference r, Reader reader) throws IOException { + Reference r, StoreFileReader reader) throws IOException { ctPreStoreFileReaderOpen.incrementAndGet(); return null; } @Override - public Reader postStoreFileReaderOpen(ObserverContext ctx, + public StoreFileReader postStoreFileReaderOpen(ObserverContext ctx, FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, - Reference r, Reader reader) throws IOException { + Reference r, StoreFileReader reader) throws IOException { ctPostStoreFileReaderOpen.incrementAndGet(); return reader; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index 91a00e4..239d8c6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.Durability; @@ -51,7 +52,6 @@ import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.Region; -import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; @@ -354,7 +354,7 @@ public class TestCacheOnWrite { .withBlockSize(DATA_BLOCK_SIZE) .withDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding()) .withIncludesTags(useTags).build(); - StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, cacheConf, fs) + StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs) .withOutputDir(storeFileParentDir).withComparator(KeyValue.COMPARATOR) .withFileContext(meta) .withBloomType(BLOOM_TYPE).withMaxKeyCount(NUM_KV).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java index 1bcc4a5..5e2fbf1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java @@ -27,9 +27,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.fs.HFileSystem; -import org.apache.hadoop.hbase.regionserver.StoreFile; import org.junit.Before; import org.junit.Test; @@ -103,7 +103,7 @@ public class TestPrefetch { HFileContext meta = new HFileContextBuilder() .withBlockSize(DATA_BLOCK_SIZE) .build(); - StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, cacheConf, fs) + StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs) .withOutputDir(storeFileParentDir) .withComparator(KeyValue.COMPARATOR) .withFileContext(meta) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java index ac92f4f..19578a6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.Bytes; @@ -114,8 +114,8 @@ public class TestSeekBeforeWithInlineBlocks { .withBlockSize(DATA_BLOCK_SIZE) .build(); - StoreFile.Writer storeFileWriter = - new StoreFile.WriterBuilder(conf, cacheConf, fs) + StoreFileWriter storeFileWriter = + new StoreFileWriter.Builder(conf, cacheConf, fs) .withFilePath(hfilePath) .withFileContext(meta) .withBloomType(bloomType) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java index c3f7b82..a311501 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java @@ -185,7 +185,7 @@ public class CreateRandomStoreFile { HFileContext meta = new HFileContextBuilder().withCompression(compr) .withBlockSize(blockSize).build(); - StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, + StoreFileWriter sfw = new StoreFileWriter.Builder(conf, new CacheConfig(conf), fs) .withOutputDir(outputDir) .withBloomType(bloomType) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java index 1927334..0ecdda6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java @@ -595,7 +595,7 @@ public class DataBlockEncodingTool { StoreFile hsf = new StoreFile(fs, path, conf, cacheConf, BloomType.NONE); - StoreFile.Reader reader = hsf.createReader(); + StoreFileReader reader = hsf.createReader(); reader.loadFileInfo(); KeyValueScanner scanner = reader.getStoreFileScanner(true, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java index 6c801bf..45444cc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/EncodedSeekPerformanceTest.java @@ -62,7 +62,7 @@ public class EncodedSeekPerformanceTest { StoreFile storeFile = new StoreFile(testingUtility.getTestFileSystem(), path, configuration, cacheConf, BloomType.NONE); - StoreFile.Reader reader = storeFile.createReader(); + StoreFileReader reader = storeFile.createReader(); StoreFileScanner scanner = reader.getStoreFileScanner(true, false); Cell current; @@ -94,7 +94,7 @@ public class EncodedSeekPerformanceTest { long totalSize = 0; - StoreFile.Reader reader = storeFile.createReader(); + StoreFileReader reader = storeFile.createReader(); StoreFileScanner scanner = reader.getStoreFileScanner(true, false); long startReadingTime = System.nanoTime(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java index 3a12674..5eee03b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java @@ -96,11 +96,11 @@ public class MockStoreFile extends StoreFile { } @Override - public StoreFile.Reader getReader() { + public StoreFileReader getReader() { final long len = this.length; final TimeRangeTracker timeRange = this.timeRangeTracker; final long entries = this.entryCount; - return new StoreFile.Reader() { + return new StoreFileReader() { @Override public long length() { return len; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java index 9a227ab..fd0301a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java @@ -207,7 +207,7 @@ public class TestCacheOnWriteInSchema { @Test public void testCacheOnWriteInSchema() throws IOException { // Write some random data into the store - StoreFile.Writer writer = store.createWriterInTmp(Integer.MAX_VALUE, + StoreFileWriter writer = store.createWriterInTmp(Integer.MAX_VALUE, HFile.DEFAULT_COMPRESSION_ALGORITHM, false, true, false); writeStoreFile(writer); writer.close(); @@ -273,7 +273,7 @@ public class TestCacheOnWriteInSchema { } } - private void writeStoreFile(StoreFile.Writer writer) throws IOException { + private void writeStoreFile(StoreFileWriter writer) throws IOException { final int rowLen = 32; for (int i = 0; i < NUM_KV; ++i) { byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index 6e52d07..b82fb9e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -573,7 +573,7 @@ public class TestCompaction { private static StoreFile createFile() throws Exception { StoreFile sf = mock(StoreFile.class); when(sf.getPath()).thenReturn(new Path("file")); - StoreFile.Reader r = mock(StoreFile.Reader.class); + StoreFileReader r = mock(StoreFileReader.class); when(r.length()).thenReturn(10L); when(sf.getReader()).thenReturn(r); return sf; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java index 0129fad..ae94a68 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java @@ -198,7 +198,7 @@ public class TestCompoundBloomFilter { private void readStoreFile(int t, BloomType bt, List kvs, Path sfPath) throws IOException { StoreFile sf = new StoreFile(fs, sfPath, conf, cacheConf, bt); - StoreFile.Reader r = sf.createReader(); + StoreFileReader r = sf.createReader(); final boolean pread = true; // does not really matter StoreFileScanner scanner = r.getStoreFileScanner(true, pread); @@ -298,7 +298,7 @@ public class TestCompoundBloomFilter { conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true); cacheConf = new CacheConfig(conf); HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCK_SIZES[t]).build(); - StoreFile.Writer w = new StoreFile.WriterBuilder(conf, cacheConf, fs) + StoreFileWriter w = new StoreFileWriter.Builder(conf, cacheConf, fs) .withOutputDir(TEST_UTIL.getDataTestDir()) .withBloomType(bt) .withFileContext(meta) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java index cc509a4..de56ad4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java @@ -81,7 +81,7 @@ public class TestFSErrorsExposed { FileSystem fs = new HFileSystem(faultyfs); CacheConfig cacheConf = new CacheConfig(util.getConfiguration()); HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build(); - StoreFile.Writer writer = new StoreFile.WriterBuilder( + StoreFileWriter writer = new StoreFileWriter.Builder( util.getConfiguration(), cacheConf, hfs) .withOutputDir(hfilePath) .withFileContext(meta) @@ -92,7 +92,7 @@ public class TestFSErrorsExposed { StoreFile sf = new StoreFile(fs, writer.getPath(), util.getConfiguration(), cacheConf, BloomType.NONE); - StoreFile.Reader reader = sf.createReader(); + StoreFileReader reader = sf.createReader(); HFileScanner scanner = reader.getScanner(false, true); FaultyInputStream inStream = faultyfs.inStreams.get(0).get(); @@ -131,7 +131,7 @@ public class TestFSErrorsExposed { HFileSystem fs = new HFileSystem(faultyfs); CacheConfig cacheConf = new CacheConfig(util.getConfiguration()); HFileContext meta = new HFileContextBuilder().withBlockSize(2 * 1024).build(); - StoreFile.Writer writer = new StoreFile.WriterBuilder( + StoreFileWriter writer = new StoreFileWriter.Builder( util.getConfiguration(), cacheConf, hfs) .withOutputDir(hfilePath) .withFileContext(meta) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 4d31374..9b7995b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -4204,7 +4204,7 @@ public class TestHRegion { HStore store = (HStore) region.getStore(fam1); Collection storeFiles = store.getStorefiles(); for (StoreFile storefile : storeFiles) { - StoreFile.Reader reader = storefile.getReader(); + StoreFileReader reader = storefile.getReader(); reader.loadFileInfo(); reader.loadBloomfilter(); assertEquals(num_unique_rows * duplicate_multiplier, reader.getEntries()); @@ -4216,7 +4216,7 @@ public class TestHRegion { // after compaction storeFiles = store.getStorefiles(); for (StoreFile storefile : storeFiles) { - StoreFile.Reader reader = storefile.getReader(); + StoreFileReader reader = storefile.getReader(); reader.loadFileInfo(); reader.loadBloomfilter(); assertEquals(num_unique_rows * duplicate_multiplier * num_storefiles, reader.getEntries()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java index 7682024..c70d6b4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWithBloomError.java @@ -146,7 +146,7 @@ public class TestScanWithBloomError { } }); - StoreFile.Reader lastStoreFileReader = null; + StoreFileReader lastStoreFileReader = null; for (StoreFileScanner sfScanner : scanners) lastStoreFileReader = sfScanner.getReader(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java index 081a7ba..b9b2b00 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java @@ -265,7 +265,7 @@ public class TestStore { init(name.getMethodName(), conf, hcd); // Test createWriterInTmp() - StoreFile.Writer writer = store.createWriterInTmp(4, hcd.getCompression(), false, true, false); + StoreFileWriter writer = store.createWriterInTmp(4, hcd.getCompression(), false, true, false); Path path = writer.getPath(); writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1))); writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2))); @@ -432,7 +432,7 @@ public class TestStore { Configuration c = HBaseConfiguration.create(); FileSystem fs = FileSystem.get(c); HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build(); - StoreFile.Writer w = new StoreFile.WriterBuilder(c, new CacheConfig(c), + StoreFileWriter w = new StoreFileWriter.Builder(c, new CacheConfig(c), fs) .withOutputDir(storedir) .withFileContext(meta) @@ -1007,7 +1007,7 @@ public class TestStore { Configuration c = TEST_UTIL.getConfiguration(); FileSystem fs = FileSystem.get(c); HFileContext fileContext = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build(); - StoreFile.Writer w = new StoreFile.WriterBuilder(c, new CacheConfig(c), + StoreFileWriter w = new StoreFileWriter.Builder(c, new CacheConfig(c), fs) .withOutputDir(storedir) .withFileContext(fileContext) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java index da4593b..7fcfa98 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java @@ -66,7 +66,6 @@ import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; /** @@ -104,7 +103,7 @@ public class TestStoreFile extends HBaseTestCase { conf, fs, new Path(this.testDir, hri.getTable().getNameAsString()), hri); HFileContext meta = new HFileContextBuilder().withBlockSize(2*1024).build(); - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(regionFs.createTempName()) .withFileContext(meta) .build(); @@ -116,7 +115,7 @@ public class TestStoreFile extends HBaseTestCase { checkHalfHFile(regionFs, sf); } - private void writeStoreFile(final StoreFile.Writer writer) throws IOException { + private void writeStoreFile(final StoreFileWriter writer) throws IOException { writeStoreFile(writer, Bytes.toBytes(getName()), Bytes.toBytes(getName())); } @@ -129,7 +128,7 @@ public class TestStoreFile extends HBaseTestCase { * @param writer * @throws IOException */ - public static void writeStoreFile(final StoreFile.Writer writer, byte[] fam, byte[] qualifier) + public static void writeStoreFile(final StoreFileWriter writer, byte[] fam, byte[] qualifier) throws IOException { long now = System.currentTimeMillis(); try { @@ -156,7 +155,7 @@ public class TestStoreFile extends HBaseTestCase { HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(regionFs.createTempName()) .withFileContext(meta) .build(); @@ -165,7 +164,7 @@ public class TestStoreFile extends HBaseTestCase { Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath()); StoreFile hsf = new StoreFile(this.fs, hsfPath, conf, cacheConf, BloomType.NONE); - StoreFile.Reader reader = hsf.createReader(); + StoreFileReader reader = hsf.createReader(); // Split on a row, not in middle of row. Midkey returned by reader // may be in middle of row. Create new one with empty column and // timestamp. @@ -205,7 +204,7 @@ public class TestStoreFile extends HBaseTestCase { HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(regionFs.createTempName()) .withFileContext(meta) .build(); @@ -234,7 +233,7 @@ public class TestStoreFile extends HBaseTestCase { } public void testEmptyStoreFileRestrictKeyRanges() throws Exception { - StoreFile.Reader reader = mock(StoreFile.Reader.class); + StoreFileReader reader = mock(StoreFileReader.class); Store store = mock(Store.class); HColumnDescriptor hcd = mock(HColumnDescriptor.class); byte[] cf = Bytes.toBytes("ty"); @@ -263,7 +262,7 @@ public class TestStoreFile extends HBaseTestCase { HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. //// - StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(testConf, cacheConf, this.fs) .withFilePath(regionFs.createTempName()) .withFileContext(meta) .build(); @@ -341,9 +340,9 @@ public class TestStoreFile extends HBaseTestCase { midRow, null); Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false); // Make readers on top and bottom. - StoreFile.Reader top = new StoreFile( + StoreFileReader top = new StoreFile( this.fs, topPath, conf, cacheConf, BloomType.NONE).createReader(); - StoreFile.Reader bottom = new StoreFile( + StoreFileReader bottom = new StoreFile( this.fs, bottomPath, conf, cacheConf, BloomType.NONE).createReader(); ByteBuffer previous = null; LOG.info("Midkey: " + midKV.toString()); @@ -471,7 +470,7 @@ public class TestStoreFile extends HBaseTestCase { private static final String localFormatter = "%010d"; - private void bloomWriteRead(StoreFile.Writer writer, FileSystem fs) throws Exception { + private void bloomWriteRead(StoreFileWriter writer, FileSystem fs) throws Exception { float err = conf.getFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, 0); Path f = writer.getPath(); long now = System.currentTimeMillis(); @@ -483,7 +482,7 @@ public class TestStoreFile extends HBaseTestCase { } writer.close(); - StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf); + StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf); reader.loadFileInfo(); reader.loadBloomfilter(); StoreFileScanner scanner = reader.getStoreFileScanner(false, false); @@ -531,7 +530,7 @@ public class TestStoreFile extends HBaseTestCase { .withChecksumType(CKTYPE) .withBytesPerCheckSum(CKBYTES).build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(f) .withBloomType(BloomType.ROW) .withMaxKeyCount(2000) @@ -554,7 +553,7 @@ public class TestStoreFile extends HBaseTestCase { .withChecksumType(CKTYPE) .withBytesPerCheckSum(CKBYTES).build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(f) .withMaxKeyCount(2000) .withFileContext(meta) @@ -570,7 +569,7 @@ public class TestStoreFile extends HBaseTestCase { } writer.close(); - StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf); + StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf); reader.loadFileInfo(); reader.loadBloomfilter(); @@ -607,7 +606,7 @@ public class TestStoreFile extends HBaseTestCase { Path f = new Path(ROOT_DIR, getName()); HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(f) .withFileContext(meta) .build(); @@ -615,7 +614,7 @@ public class TestStoreFile extends HBaseTestCase { writeStoreFile(writer); writer.close(); - StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf); + StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf); // Now do reseek with empty KV to position to the beginning of the file @@ -652,7 +651,7 @@ public class TestStoreFile extends HBaseTestCase { .withChecksumType(CKTYPE) .withBytesPerCheckSum(CKBYTES).build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(f) .withBloomType(bt[x]) .withMaxKeyCount(expKeys[x]) @@ -674,7 +673,7 @@ public class TestStoreFile extends HBaseTestCase { } writer.close(); - StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf); + StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf); reader.loadFileInfo(); reader.loadBloomfilter(); StoreFileScanner scanner = reader.getStoreFileScanner(false, false); @@ -749,7 +748,7 @@ public class TestStoreFile extends HBaseTestCase { long seqId, String path) { StoreFile mock = Mockito.mock(StoreFile.class); - StoreFile.Reader reader = Mockito.mock(StoreFile.Reader.class); + StoreFileReader reader = Mockito.mock(StoreFileReader.class); Mockito.doReturn(size).when(reader).length(); @@ -805,7 +804,7 @@ public class TestStoreFile extends HBaseTestCase { Path dir = new Path(storedir, "1234567890"); HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withOutputDir(dir) .withFileContext(meta) .build(); @@ -825,7 +824,7 @@ public class TestStoreFile extends HBaseTestCase { HColumnDescriptor hcd = mock(HColumnDescriptor.class); when(hcd.getName()).thenReturn(family); when(store.getFamily()).thenReturn(hcd); - StoreFile.Reader reader = hsf.createReader(); + StoreFileReader reader = hsf.createReader(); StoreFileScanner scanner = reader.getStoreFileScanner(false, false); TreeSet columns = new TreeSet(Bytes.BYTES_COMPARATOR); columns.add(qualifier); @@ -875,13 +874,13 @@ public class TestStoreFile extends HBaseTestCase { conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false); CacheConfig cacheConf = new CacheConfig(conf); Path pathCowOff = new Path(baseDir, "123456789"); - StoreFile.Writer writer = writeStoreFile(conf, cacheConf, pathCowOff, 3); + StoreFileWriter writer = writeStoreFile(conf, cacheConf, pathCowOff, 3); StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf, BloomType.NONE); LOG.debug(hsf.getPath().toString()); // Read this file, we should see 3 misses - StoreFile.Reader reader = hsf.createReader(); + StoreFileReader reader = hsf.createReader(); reader.loadFileInfo(); StoreFileScanner scanner = reader.getStoreFileScanner(true, true); scanner.seek(KeyValue.LOWESTKEY); @@ -916,13 +915,13 @@ public class TestStoreFile extends HBaseTestCase { // Let's read back the two files to ensure the blocks exactly match hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf, BloomType.NONE); - StoreFile.Reader readerOne = hsf.createReader(); + StoreFileReader readerOne = hsf.createReader(); readerOne.loadFileInfo(); StoreFileScanner scannerOne = readerOne.getStoreFileScanner(true, true); scannerOne.seek(KeyValue.LOWESTKEY); hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf, BloomType.NONE); - StoreFile.Reader readerTwo = hsf.createReader(); + StoreFileReader readerTwo = hsf.createReader(); readerTwo.loadFileInfo(); StoreFileScanner scannerTwo = readerTwo.getStoreFileScanner(true, true); scannerTwo.seek(KeyValue.LOWESTKEY); @@ -990,7 +989,7 @@ public class TestStoreFile extends HBaseTestCase { return new Path(new Path(regionDir, family), path.getName()); } - private StoreFile.Writer writeStoreFile(Configuration conf, + private StoreFileWriter writeStoreFile(Configuration conf, CacheConfig cacheConf, Path path, int numBlocks) throws IOException { // Let's put ~5 small KVs in each block, so let's make 5*numBlocks KVs @@ -1010,7 +1009,7 @@ public class TestStoreFile extends HBaseTestCase { .withBytesPerCheckSum(CKBYTES) .build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(path) .withMaxKeyCount(2000) .withFileContext(meta) @@ -1046,7 +1045,7 @@ public class TestStoreFile extends HBaseTestCase { .withDataBlockEncoding(dataBlockEncoderAlgo) .build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(path) .withMaxKeyCount(2000) .withFileContext(meta) @@ -1055,7 +1054,7 @@ public class TestStoreFile extends HBaseTestCase { StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE); - StoreFile.Reader reader = storeFile.createReader(); + StoreFileReader reader = storeFile.createReader(); Map fileInfo = reader.loadFileInfo(); byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java index 4a6b2e7..758487d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java @@ -65,13 +65,13 @@ public class TestStoreFileScannerWithTagCompression { HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).withIncludesTags(true) .withCompressTags(true).withDataBlockEncoding(DataBlockEncoding.PREFIX).build(); // Make a store file and write data to it. - StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs).withFilePath(f) + StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(f) .withFileContext(meta).build(); writeStoreFile(writer); writer.close(); - StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf); + StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf); StoreFileScanner s = reader.getStoreFileScanner(false, false); try { // Now do reseek with empty KV to position to the beginning of the file @@ -91,7 +91,7 @@ public class TestStoreFileScannerWithTagCompression { } } - private void writeStoreFile(final StoreFile.Writer writer) throws IOException { + private void writeStoreFile(final StoreFileWriter writer) throws IOException { byte[] fam = Bytes.toBytes("f"); byte[] qualifier = Bytes.toBytes("q"); long now = System.currentTimeMillis(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeCompactor.java index 10f8af2..b966b26 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeCompactor.java @@ -218,7 +218,7 @@ public class TestStripeCompactor { // "Files" are totally unused, it's Scanner class below that gives compactor fake KVs. // But compaction depends on everything under the sun, so stub everything with dummies. StoreFile sf = mock(StoreFile.class); - StoreFile.Reader r = mock(StoreFile.Reader.class); + StoreFileReader r = mock(StoreFileReader.class); when(r.length()).thenReturn(1L); when(r.getBloomFilterType()).thenReturn(BloomType.NONE); when(r.getHFileReader()).thenReturn(mock(HFile.Reader.class)); @@ -255,7 +255,7 @@ public class TestStripeCompactor { // StoreFile.Writer has private ctor and is unwieldy, so this has to be convoluted. public static class StoreFileWritersCapture implements - Answer, StripeMultiFileWriter.WriterFactory { + Answer, StripeMultiFileWriter.WriterFactory { public static class Writer { public ArrayList kvs = new ArrayList(); public TreeMap data = new TreeMap(Bytes.BYTES_COMPARATOR); @@ -264,10 +264,10 @@ public class TestStripeCompactor { private List writers = new ArrayList(); @Override - public StoreFile.Writer createWriter() throws IOException { + public StoreFileWriter createWriter() throws IOException { final Writer realWriter = new Writer(); writers.add(realWriter); - StoreFile.Writer writer = mock(StoreFile.Writer.class); + StoreFileWriter writer = mock(StoreFileWriter.class); doAnswer(new Answer() { public Object answer(InvocationOnMock invocation) { return realWriter.kvs.add((KeyValue)invocation.getArguments()[0]); @@ -281,7 +281,7 @@ public class TestStripeCompactor { } @Override - public StoreFile.Writer answer(InvocationOnMock invocation) throws Throwable { + public StoreFileWriter answer(InvocationOnMock invocation) throws Throwable { return createWriter(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java index e17cb70..058512e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreEngine.java @@ -105,7 +105,7 @@ public class TestStripeStoreEngine { StoreFile sf = mock(StoreFile.class); when(sf.getMetadataValue(any(byte[].class))) .thenReturn(StripeStoreFileManager.INVALID_KEY); - when(sf.getReader()).thenReturn(mock(StoreFile.Reader.class)); + when(sf.getReader()).thenReturn(mock(StoreFileReader.class)); when(sf.getPath()).thenReturn(new Path("moo")); return sf; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/MockStoreFileGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/MockStoreFileGenerator.java index 0760736..663714a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/MockStoreFileGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/MockStoreFileGenerator.java @@ -23,10 +23,10 @@ import java.util.List; import java.util.Random; import com.google.common.base.Objects; -import com.google.common.io.Files; import org.apache.commons.lang.RandomStringUtils; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileReader; import org.apache.hadoop.util.StringUtils; import static org.mockito.Mockito.mock; @@ -63,7 +63,7 @@ class MockStoreFileGenerator { protected StoreFile createMockStoreFile(final long sizeInBytes, final long seqId) { StoreFile mockSf = mock(StoreFile.class); - StoreFile.Reader reader = mock(StoreFile.Reader.class); + StoreFileReader reader = mock(StoreFileReader.class); String stringPath = "/hbase/testTable/regionA/" + RandomStringUtils.random(FILENAME_LENGTH, 0, 0, true, true, null, random); Path path = new Path(stringPath); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java index 2abb4dd..7dba7bb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.StoreFileReader; import org.apache.hadoop.hbase.regionserver.StoreFileScanner; import org.apache.hadoop.hbase.regionserver.StripeMultiFileWriter; import org.apache.hadoop.hbase.regionserver.StripeStoreConfig; @@ -717,7 +718,7 @@ public class TestStripeCompactionPolicy { private static StoreFile createFile(long size) throws Exception { StoreFile sf = mock(StoreFile.class); when(sf.getPath()).thenReturn(new Path("moo")); - StoreFile.Reader r = mock(StoreFile.Reader.class); + StoreFileReader r = mock(StoreFileReader.class); when(r.getEntries()).thenReturn(size); when(r.length()).thenReturn(size); when(r.getBloomFilterType()).thenReturn(BloomType.NONE); -- 2.3.2 (Apple Git-55)