From dc1f45cb4c84a01c71fb2c9e152f37119434a94a Mon Sep 17 00:00:00 2001 From: Allan Yang Date: Sun, 30 Apr 2017 12:01:26 +0800 Subject: [PATCH] HBASE-17757 Unify blocksize after encoding to decrease memory fragment --- .../apache/hadoop/hbase/io/hfile/HFileBlock.java | 20 ++++++++++++++++++ .../hadoop/hbase/io/hfile/HFileWriterV2.java | 24 ++++++++++++++++------ 2 files changed, 38 insertions(+), 6 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index b8629da..3b014b9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -844,6 +844,10 @@ public class HFileBlock implements Cacheable { // includes the header size also. private int unencodedDataSizeWritten; + // Size of actual data being written. considering the block encoding. This + // includes the header size also. + private int encodedDataSizeWritten; + /** * Bytes to be written to the file system, including the header. Compressed * if compression is turned on. It also includes the checksum data that @@ -931,6 +935,7 @@ public class HFileBlock implements Cacheable { this.dataBlockEncoder.startBlockEncoding(dataBlockEncodingCtx, userDataStream); } this.unencodedDataSizeWritten = 0; + this.encodedDataSizeWritten = 0; return userDataStream; } @@ -941,8 +946,10 @@ public class HFileBlock implements Cacheable { */ void write(Cell cell) throws IOException{ expectState(State.WRITING); + int posBeforeEncode = this.userDataStream.size(); this.unencodedDataSizeWritten += this.dataBlockEncoder.encode(cell, dataBlockEncodingCtx, this.userDataStream); + this.encodedDataSizeWritten += this.userDataStream.size() - posBeforeEncode; } /** @@ -1179,6 +1186,19 @@ public class HFileBlock implements Cacheable { * * @return the number of bytes written */ + public int encodedBlockSizeWritten() { + if (state != State.WRITING) + return 0; + return this.encodedDataSizeWritten; + } + + /** + * Returns the number of bytes written into the current block so far, or + * zero if not writing the block at the moment. Note that this will return + * zero in the "block ready" state as well. + * + * @return the number of bytes written + */ int blockSizeWritten() { if (state != State.WRITING) return 0; return this.unencodedDataSizeWritten; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java index 9e68dc3..394cd44 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java @@ -93,6 +93,14 @@ public class HFileWriterV2 extends AbstractHFileWriter { /** warn on cell with tags */ private static boolean warnCellWithTags = true; + + /** if this feature is enabled, preCalculate encoded data size before real encoding happens*/ + public static final String UNIFIED_ENCODED_BLOCKSIZE_RATIO = "hbase.writer.unified.encoded.blocksize.ratio"; + + /** Block size limit after encoding, used to unify encoded block Cache entry size*/ + private final int encodedBlockSizeLimit; + + static class WriterFactoryV2 extends HFile.WriterFactory { WriterFactoryV2(Configuration conf, CacheConfig cacheConf) { super(conf, cacheConf); @@ -115,6 +123,8 @@ public class HFileWriterV2 extends AbstractHFileWriter { super(cacheConf, ostream == null ? createOutputStream(conf, fs, path, null) : ostream, path, comparator, context); + float encodeBlockSizeRatio = conf.getFloat(UNIFIED_ENCODED_BLOCKSIZE_RATIO, 1f); + this.encodedBlockSizeLimit = (int)(hFileContext.getBlocksize() * encodeBlockSizeRatio); finishInit(conf); } @@ -147,12 +157,14 @@ public class HFileWriterV2 extends AbstractHFileWriter { * @throws IOException */ protected void checkBlockBoundary() throws IOException { - if (fsBlockWriter.blockSizeWritten() < hFileContext.getBlocksize()) - return; - - finishBlock(); - writeInlineBlocks(false); - newBlock(); + //for encoder like prefixTree, encoded size is not available, so we have to compare both encoded size + //and unencoded size to blocksize limit. + if (fsBlockWriter.encodedBlockSizeWritten() >= encodedBlockSizeLimit + || fsBlockWriter.blockSizeWritten() >= hFileContext.getBlocksize()) { + finishBlock(); + writeInlineBlocks(false); + newBlock(); + } } /** Clean up the current data block */ -- 2.7.3.windows.1