From c08ca542ce9cefe45b283711f4c19bb55f6894d9 Mon Sep 17 00:00:00 2001 From: mbautin Date: Wed, 22 Feb 2012 22:42:15 -0800 Subject: [PATCH] HFile/StoreFile builder --- .../org/apache/hadoop/hbase/io/hfile/HFile.java | 86 ++++++++-- .../hadoop/hbase/io/hfile/HFileWriterV1.java | 80 +--------- .../hadoop/hbase/io/hfile/HFileWriterV2.java | 86 +--------- .../hadoop/hbase/mapreduce/HFileOutputFormat.java | 10 +- .../hbase/mapreduce/LoadIncrementalHFiles.java | 11 +- .../apache/hadoop/hbase/regionserver/Store.java | 11 +- .../hadoop/hbase/regionserver/StoreFile.java | 176 +++++++++++++------ .../apache/hadoop/hbase/util/CompressionTest.java | 7 +- .../hadoop/hbase/HFilePerformanceEvaluation.java | 7 +- .../hadoop/hbase/io/TestHalfStoreFileReader.java | 8 +- .../hadoop/hbase/io/hfile/TestCacheOnWrite.java | 12 +- .../apache/hadoop/hbase/io/hfile/TestHFile.java | 54 ++++--- .../hadoop/hbase/io/hfile/TestHFileBlockIndex.java | 8 +- .../hbase/io/hfile/TestHFilePerformance.java | 8 +- .../hadoop/hbase/io/hfile/TestHFileSeek.java | 8 +- .../hadoop/hbase/io/hfile/TestHFileWriterV2.java | 9 +- .../apache/hadoop/hbase/io/hfile/TestReseekTo.java | 6 +- .../apache/hadoop/hbase/io/hfile/TestSeekTo.java | 7 +- .../hbase/mapreduce/TestLoadIncrementalHFiles.java | 10 +- .../hbase/regionserver/CreateRandomStoreFile.java | 10 +- .../hbase/regionserver/HFileReadWriteTest.java | 13 +- .../regionserver/TestCompoundBloomFilter.java | 8 +- .../hbase/regionserver/TestFSErrorsExposed.java | 12 +- .../regionserver/TestHRegionServerBulkLoad.java | 6 +- .../hadoop/hbase/regionserver/TestStore.java | 6 +- .../hadoop/hbase/regionserver/TestStoreFile.java | 107 +++++++----- .../hbase/regionserver/wal/TestWALReplay.java | 2 +- 27 files changed, 415 insertions(+), 353 deletions(-) diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index d87d9a6..e765e77 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.KeyComparator; @@ -51,6 +52,8 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.io.RawComparator; import org.apache.hadoop.io.Writable; +import com.google.common.base.Preconditions; + /** * File format for hbase. * A file of sorted key/value pairs. Both keys and values are byte arrays. @@ -232,33 +235,82 @@ public class HFile { * we want to be able to swap writer implementations. */ public static abstract class WriterFactory { - protected Configuration conf; - protected CacheConfig cacheConf; + protected final Configuration conf; + protected final CacheConfig cacheConf; + protected FileSystem fs; + protected Path path; + protected FSDataOutputStream ostream; + protected int blockSize = HColumnDescriptor.DEFAULT_BLOCKSIZE; + protected Compression.Algorithm compression = + HFile.DEFAULT_COMPRESSION_ALGORITHM; + protected HFileDataBlockEncoder encoder = NoOpDataBlockEncoder.INSTANCE; + protected KeyComparator comparator; WriterFactory(Configuration conf, CacheConfig cacheConf) { this.conf = conf; this.cacheConf = cacheConf; } - public abstract Writer createWriter(FileSystem fs, Path path) - throws IOException; + public WriterFactory withPath(FileSystem fs, Path path) { + Preconditions.checkNotNull(fs); + Preconditions.checkNotNull(path); + this.fs = fs; + this.path = path; + return this; + } - public abstract Writer createWriter(FileSystem fs, Path path, - int blockSize, Compression.Algorithm compress, - HFileDataBlockEncoder dataBlockEncoder, - final KeyComparator comparator) throws IOException; + public WriterFactory withOutputStream(FSDataOutputStream ostream) { + Preconditions.checkNotNull(ostream); + this.ostream = ostream; + return this; + } - public abstract Writer createWriter(FileSystem fs, Path path, - int blockSize, String compress, - final KeyComparator comparator) throws IOException; + public WriterFactory withBlockSize(int blockSize) { + this.blockSize = blockSize; + return this; + } - public abstract Writer createWriter(final FSDataOutputStream ostream, - final int blockSize, final String compress, - final KeyComparator comparator) throws IOException; + public WriterFactory withCompression(Compression.Algorithm compression) { + Preconditions.checkNotNull(compression); + this.compression = compression; + return this; + } - public abstract Writer createWriter(final FSDataOutputStream ostream, - final int blockSize, final Compression.Algorithm compress, - final KeyComparator c) throws IOException; + public WriterFactory withCompression(String compressAlgo) { + Preconditions.checkNotNull(compression); + this.compression = AbstractHFileWriter.compressionByName(compressAlgo); + return this; + } + + public WriterFactory withDataBlockEncoder(HFileDataBlockEncoder encoder) { + Preconditions.checkNotNull(encoder); + this.encoder = encoder; + return this; + } + + public WriterFactory withComparator(KeyComparator comparator) { + Preconditions.checkNotNull(comparator); + this.comparator = comparator; + return this; + } + + public Writer create() throws IOException { + if ((path != null ? 1 : 0) + (ostream != null ? 1 : 0) != 1) { + throw new AssertionError("Please specify exactly one of " + + "filesystem/path or path"); + } + if (path != null) { + ostream = AbstractHFileWriter.createOutputStream(conf, fs, path); + } + return createWriter(fs, path, ostream, blockSize, + compression, encoder, comparator); + } + + protected abstract Writer createWriter(FileSystem fs, Path path, + FSDataOutputStream ostream, int blockSize, + Compression.Algorithm compress, + HFileDataBlockEncoder dataBlockEncoder, + KeyComparator comparator) throws IOException; } /** The configuration key for HFile version to use for new files */ diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java index 9cd26d5..080a14c 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV1.java @@ -82,102 +82,32 @@ public class HFileWriterV1 extends AbstractHFileWriter { private int blockNumber = 0; static class WriterFactoryV1 extends HFile.WriterFactory { - WriterFactoryV1(Configuration conf, CacheConfig cacheConf) { super(conf, cacheConf); } @Override - public Writer createWriter(FileSystem fs, Path path) throws IOException { - return new HFileWriterV1(conf, cacheConf, fs, path); - } - - @Override - public Writer createWriter(FileSystem fs, Path path, int blockSize, + public Writer createWriter(FileSystem fs, Path path, + FSDataOutputStream ostream, int blockSize, Algorithm compressAlgo, HFileDataBlockEncoder dataBlockEncoder, KeyComparator comparator) throws IOException { - return new HFileWriterV1(conf, cacheConf, fs, path, blockSize, + return new HFileWriterV1(conf, cacheConf, fs, path, ostream, blockSize, compressAlgo, dataBlockEncoder, comparator); } - - @Override - public Writer createWriter(FileSystem fs, Path path, int blockSize, - String compressAlgoName, KeyComparator comparator) - throws IOException { - return new HFileWriterV1(conf, cacheConf, fs, path, blockSize, - compressAlgoName, comparator); - } - - @Override - public Writer createWriter(final FSDataOutputStream ostream, - final int blockSize, final String compress, - final KeyComparator comparator) throws IOException { - return new HFileWriterV1(cacheConf, ostream, blockSize, compress, - comparator); - } - - @Override - public Writer createWriter(final FSDataOutputStream ostream, - final int blockSize, final Compression.Algorithm compress, - final KeyComparator c) throws IOException { - return new HFileWriterV1(cacheConf, ostream, blockSize, compress, - NoOpDataBlockEncoder.INSTANCE, c); - } - } - - /** Constructor that uses all defaults for compression and block size. */ - public HFileWriterV1(Configuration conf, CacheConfig cacheConf, - FileSystem fs, Path path) - throws IOException { - this(conf, cacheConf, fs, path, HFile.DEFAULT_BLOCKSIZE, - HFile.DEFAULT_COMPRESSION_ALGORITHM, - NoOpDataBlockEncoder.INSTANCE, null); - } - - /** - * Constructor that takes a path, creates and closes the output stream. Takes - * compression algorithm name as string. - */ - public HFileWriterV1(Configuration conf, CacheConfig cacheConf, FileSystem fs, - Path path, int blockSize, String compressAlgoName, - final KeyComparator comparator) throws IOException { - this(conf, cacheConf, fs, path, blockSize, - compressionByName(compressAlgoName), NoOpDataBlockEncoder.INSTANCE, - comparator); } /** Constructor that takes a path, creates and closes the output stream. */ public HFileWriterV1(Configuration conf, CacheConfig cacheConf, - FileSystem fs, Path path, + FileSystem fs, Path path, FSDataOutputStream ostream, int blockSize, Compression.Algorithm compress, HFileDataBlockEncoder blockEncoder, final KeyComparator comparator) throws IOException { - super(cacheConf, createOutputStream(conf, fs, path), path, + super(cacheConf, ostream == null ? createOutputStream(conf, fs, path) : ostream, path, blockSize, compress, blockEncoder, comparator); SchemaMetrics.configureGlobally(conf); } - /** Constructor that takes a stream. */ - public HFileWriterV1(CacheConfig cacheConf, - final FSDataOutputStream outputStream, final int blockSize, - final String compressAlgoName, final KeyComparator comparator) - throws IOException { - this(cacheConf, outputStream, blockSize, - Compression.getCompressionAlgorithmByName(compressAlgoName), - NoOpDataBlockEncoder.INSTANCE, comparator); - } - - /** Constructor that takes a stream. */ - public HFileWriterV1(CacheConfig cacheConf, - final FSDataOutputStream outputStream, final int blockSize, - final Compression.Algorithm compress, - HFileDataBlockEncoder blockEncoder, final KeyComparator comparator) - throws IOException { - super(cacheConf, outputStream, null, blockSize, compress, - blockEncoder, comparator); - } - /** * If at block boundary, opens new block. * diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java index a904a55..ae7a134 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java @@ -83,100 +83,32 @@ public class HFileWriterV2 extends AbstractHFileWriter { private long maxMemstoreTS = 0; static class WriterFactoryV2 extends HFile.WriterFactory { - WriterFactoryV2(Configuration conf, CacheConfig cacheConf) { super(conf, cacheConf); } @Override - public Writer createWriter(FileSystem fs, Path path) - throws IOException { - return new HFileWriterV2(conf, cacheConf, fs, path); - } - - @Override - public Writer createWriter(FileSystem fs, Path path, int blockSize, + public Writer createWriter(FileSystem fs, Path path, + FSDataOutputStream ostream, int blockSize, Compression.Algorithm compress, HFileDataBlockEncoder blockEncoder, final KeyComparator comparator) throws IOException { - return new HFileWriterV2(conf, cacheConf, fs, path, blockSize, + return new HFileWriterV2(conf, cacheConf, fs, path, ostream, blockSize, compress, blockEncoder, comparator); } - - @Override - public Writer createWriter(FileSystem fs, Path path, int blockSize, - String compress, final KeyComparator comparator) - throws IOException { - return new HFileWriterV2(conf, cacheConf, fs, path, blockSize, - compress, comparator); - } - - @Override - public Writer createWriter(final FSDataOutputStream ostream, - final int blockSize, final String compress, - final KeyComparator comparator) throws IOException { - return new HFileWriterV2(conf, cacheConf, ostream, blockSize, compress, - comparator); - } - - @Override - public Writer createWriter(final FSDataOutputStream ostream, - final int blockSize, final Compression.Algorithm compress, - final KeyComparator c) throws IOException { - return new HFileWriterV2(conf, cacheConf, ostream, blockSize, compress, - c); - } - } - - /** Constructor that uses all defaults for compression and block size. */ - public HFileWriterV2(Configuration conf, CacheConfig cacheConf, - FileSystem fs, Path path) - throws IOException { - this(conf, cacheConf, fs, path, HFile.DEFAULT_BLOCKSIZE, - HFile.DEFAULT_COMPRESSION_ALGORITHM, null, null); - } - - /** - * Constructor that takes a path, creates and closes the output stream. Takes - * compression algorithm name as string. - */ - public HFileWriterV2(Configuration conf, CacheConfig cacheConf, FileSystem fs, - Path path, int blockSize, String compressAlgoName, - final KeyComparator comparator) throws IOException { - this(conf, cacheConf, fs, path, blockSize, - compressionByName(compressAlgoName), null, comparator); } /** Constructor that takes a path, creates and closes the output stream. */ - public HFileWriterV2(Configuration conf, CacheConfig cacheConf, FileSystem fs, - Path path, int blockSize, Compression.Algorithm compressAlgo, - HFileDataBlockEncoder blockEncoder, + public HFileWriterV2(Configuration conf, CacheConfig cacheConf, + FileSystem fs, Path path, FSDataOutputStream ostream, int blockSize, + Compression.Algorithm compressAlgo, HFileDataBlockEncoder blockEncoder, final KeyComparator comparator) throws IOException { - super(cacheConf, createOutputStream(conf, fs, path), path, - blockSize, compressAlgo, blockEncoder, comparator); + super(cacheConf, + ostream == null ? createOutputStream(conf, fs, path) : ostream, + path, blockSize, compressAlgo, blockEncoder, comparator); SchemaMetrics.configureGlobally(conf); finishInit(conf); } - /** Constructor that takes a stream. */ - public HFileWriterV2(final Configuration conf, final CacheConfig cacheConf, - final FSDataOutputStream outputStream, final int blockSize, - final String compressAlgoName, final KeyComparator comparator) - throws IOException { - this(conf, cacheConf, outputStream, blockSize, - Compression.getCompressionAlgorithmByName(compressAlgoName), - comparator); - } - - /** Constructor that takes a stream. */ - public HFileWriterV2(final Configuration conf, final CacheConfig cacheConf, - final FSDataOutputStream outputStream, final int blockSize, - final Compression.Algorithm compress, final KeyComparator comparator) - throws IOException { - super(cacheConf, outputStream, null, blockSize, compress, null, - comparator); - finishInit(conf); - } - /** Additional initialization steps */ private void finishInit(final Configuration conf) { if (fsBlockWriter != null) diff --git src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java index c5222c2..b0dd625 100644 --- src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java +++ src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java @@ -173,10 +173,12 @@ public class HFileOutputFormat extends FileOutputFormat items = genSomeKeys(); diff --git src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index 995a26e..3918104 100644 --- src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -283,9 +283,15 @@ public class TestCacheOnWrite { public void writeStoreFile() throws IOException { Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), "test_cache_on_write"); - StoreFile.Writer sfw = StoreFile.createWriter(fs, storeFileParentDir, - DATA_BLOCK_SIZE, compress, encoder, KeyValue.COMPARATOR, conf, - cacheConf, BLOOM_TYPE, NUM_KV); + StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, cacheConf, fs, + DATA_BLOCK_SIZE) + .withOutputDir(storeFileParentDir) + .withCompression(compress) + .withDataBlockEncoder(encoder) + .withComparator(KeyValue.COMPARATOR) + .withBloomType(BLOOM_TYPE) + .withMaxKeyCount(NUM_KV) + .build(); final int rowLen = 32; for (int i = 0; i < NUM_KV; ++i) { diff --git src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java index 3312cfa..bb992b8 100644 --- src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java +++ src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java @@ -80,7 +80,8 @@ public class TestHFile extends HBaseTestCase { public void testEmptyHFile() throws IOException { if (cacheConf == null) cacheConf = new CacheConfig(conf); Path f = new Path(ROOT_DIR, getName()); - Writer w = HFile.getWriterFactory(conf, cacheConf).createWriter(this.fs, f); + Writer w = + HFile.getWriterFactory(conf, cacheConf).withPath(fs, f).create(); w.close(); Reader r = HFile.createReader(fs, f, cacheConf); r.loadFileInfo(); @@ -152,8 +153,11 @@ public class TestHFile extends HBaseTestCase { if (cacheConf == null) cacheConf = new CacheConfig(conf); Path ncTFile = new Path(ROOT_DIR, "basic.hfile." + codec.toString()); FSDataOutputStream fout = createFSOutput(ncTFile); - Writer writer = HFile.getWriterFactory(conf, cacheConf).createWriter(fout, - minBlockSize, Compression.getCompressionAlgorithmByName(codec), null); + Writer writer = HFile.getWriterFactory(conf, cacheConf) + .withOutputStream(fout) + .withBlockSize(minBlockSize) + .withCompression(codec) + .create(); LOG.info(writer); writeRecords(writer); fout.close(); @@ -229,9 +233,11 @@ public class TestHFile extends HBaseTestCase { if (cacheConf == null) cacheConf = new CacheConfig(conf); Path mFile = new Path(ROOT_DIR, "meta.hfile"); FSDataOutputStream fout = createFSOutput(mFile); - Writer writer = HFile.getWriterFactory(conf, cacheConf).createWriter(fout, - minBlockSize, Compression.getCompressionAlgorithmByName(compress), - null); + Writer writer = HFile.getWriterFactory(conf, cacheConf) + .withOutputStream(fout) + .withBlockSize(minBlockSize) + .withCompression(compress) + .create(); someTestingWithMetaBlock(writer); writer.close(); fout.close(); @@ -259,8 +265,11 @@ public class TestHFile extends HBaseTestCase { HBaseTestingUtility.COMPRESSION_ALGORITHMS) { Path mFile = new Path(ROOT_DIR, "nometa_" + compressAlgo + ".hfile"); FSDataOutputStream fout = createFSOutput(mFile); - Writer writer = HFile.getWriterFactory(conf, cacheConf).createWriter(fout, - minBlockSize, compressAlgo, null); + Writer writer = HFile.getWriterFactory(conf, cacheConf) + .withOutputStream(fout) + .withBlockSize(minBlockSize) + .withCompression(compressAlgo) + .create(); writer.append("foo".getBytes(), "value".getBytes()); writer.close(); fout.close(); @@ -283,19 +292,22 @@ public class TestHFile extends HBaseTestCase { if (cacheConf == null) cacheConf = new CacheConfig(conf); Path mFile = new Path(ROOT_DIR, "meta.tfile"); FSDataOutputStream fout = createFSOutput(mFile); - Writer writer = HFile.getWriterFactory(conf, cacheConf).createWriter(fout, - minBlockSize, (Compression.Algorithm) null, new KeyComparator() { - @Override - public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, - int l2) { - return -Bytes.compareTo(b1, s1, l1, b2, s2, l2); - - } - @Override - public int compare(byte[] o1, byte[] o2) { - return compare(o1, 0, o1.length, o2, 0, o2.length); - } - }); + KeyComparator comparator = new KeyComparator() { + @Override + public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, + int l2) { + return -Bytes.compareTo(b1, s1, l1, b2, s2, l2); + } + @Override + public int compare(byte[] o1, byte[] o2) { + return compare(o1, 0, o1.length, o2, 0, o2.length); + } + }; + Writer writer = HFile.getWriterFactory(conf, cacheConf) + .withOutputStream(fout) + .withBlockSize(minBlockSize) + .withComparator(comparator) + .create(); writer.append("3".getBytes(), "0".getBytes()); writer.append("2".getBytes(), "0".getBytes()); writer.append("1".getBytes(), "0".getBytes()); diff --git src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java index d03a60d..b7d0665 100644 --- src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java +++ src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java @@ -483,8 +483,12 @@ public class TestHFileBlockIndex { // Write the HFile { HFile.Writer writer = - HFile.getWriterFactory(conf, cacheConf).createWriter(fs, - hfilePath, SMALL_BLOCK_SIZE, compr, null, KeyValue.KEY_COMPARATOR); + HFile.getWriterFactory(conf, cacheConf) + .withPath(fs, hfilePath) + .withBlockSize(SMALL_BLOCK_SIZE) + .withCompression(compr) + .withComparator(KeyValue.KEY_COMPARATOR) + .create(); Random rand = new Random(19231737); for (int i = 0; i < NUM_KV; ++i) { diff --git src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java index 339d0b0..86348d7 100644 --- src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java +++ src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFilePerformance.java @@ -161,9 +161,11 @@ public class TestHFilePerformance extends TestCase { if ("HFile".equals(fileType)){ System.out.println("HFile write method: "); - HFile.Writer writer = - HFile.getWriterFactoryNoCache(conf).createWriter(fout, - minBlockSize, codecName, null); + HFile.Writer writer = HFile.getWriterFactoryNoCache(conf) + .withOutputStream(fout) + .withBlockSize(minBlockSize) + .withCompression(codecName) + .create(); // Writing value in one shot. for (long l=0; l MAX_INT - writer = new StoreFile.Writer(fs, f, - StoreFile.DEFAULT_BLOCKSIZE_SMALL, HFile.DEFAULT_COMPRESSION_ALGORITHM, - null, conf, cacheConf, KeyValue.COMPARATOR, StoreFile.BloomType.ROW, - Integer.MAX_VALUE); + writer = new StoreFile.WriterBuilder(conf, cacheConf, fs, + StoreFile.DEFAULT_BLOCKSIZE_SMALL) + .withFilePath(f) + .withBloomType(StoreFile.BloomType.ROW) + .withMaxKeyCount(Integer.MAX_VALUE) + .build(); assertFalse(writer.hasGeneralBloom()); writer.close(); fs.delete(f, true); @@ -668,8 +683,10 @@ public class TestStoreFile extends HBaseTestCase { Path storedir = new Path(new Path(this.testDir, "regionname"), "familyname"); Path dir = new Path(storedir, "1234567890"); - StoreFile.Writer writer = StoreFile.createWriter(this.fs, dir, 8 * 1024, - conf, cacheConf); + StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, + this.fs, 8 * 1024) + .withOutputDir(dir) + .build(); List kvList = getKeyValueSet(timestamps,numRows, family, qualifier); @@ -838,10 +855,11 @@ public class TestStoreFile extends HBaseTestCase { totalSize += kv.getLength() + 1; } int blockSize = totalSize / numBlocks; - StoreFile.Writer writer = new StoreFile.Writer(fs, path, blockSize, - HFile.DEFAULT_COMPRESSION_ALGORITHM, - null, conf, cacheConf, KeyValue.COMPARATOR, StoreFile.BloomType.NONE, - 2000); + StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs, + blockSize) + .withFilePath(path) + .withMaxKeyCount(2000) + .build(); // We'll write N-1 KVs to ensure we don't write an extra block kvs.remove(kvs.size()-1); for (KeyValue kv : kvs) { @@ -867,15 +885,12 @@ public class TestStoreFile extends HBaseTestCase { dataBlockEncoderAlgo, dataBlockEncoderAlgo); cacheConf = new CacheConfig(conf); - StoreFile.Writer writer = new StoreFile.Writer(fs, - path, HFile.DEFAULT_BLOCKSIZE, - HFile.DEFAULT_COMPRESSION_ALGORITHM, - dataBlockEncoder, - conf, - cacheConf, - KeyValue.COMPARATOR, - StoreFile.BloomType.NONE, - 2000); + StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs, + HFile.DEFAULT_BLOCKSIZE) + .withFilePath(path) + .withDataBlockEncoder(dataBlockEncoder) + .withMaxKeyCount(2000) + .build(); writer.close(); StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf, diff --git src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java index 23d27fd..a11899c 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java @@ -197,7 +197,7 @@ public class TestWALReplay { HRegion region = HRegion.openHRegion(hri, htd, wal, this.conf); Path f = new Path(basedir, "hfile"); HFile.Writer writer = - HFile.getWriterFactoryNoCache(conf).createWriter(this.fs, f); + HFile.getWriterFactoryNoCache(conf).withPath(fs, f).create(); byte [] family = htd.getFamilies().iterator().next().getName(); byte [] row = Bytes.toBytes(tableNameStr); writer.append(new KeyValue(row, family, family, row)); -- 1.7.4.4