diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index c647e4b..d54cfa1 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -66,6 +66,7 @@ import org.apache.hadoop.hbase.filter.WhileMatchFilter; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; +import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.trace.SpanReceiverHost; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Hash; @@ -293,6 +294,7 @@ public class PerformanceEvaluation extends Configured implements Tool { HColumnDescriptor family = new HColumnDescriptor(FAMILY_NAME); family.setDataBlockEncoding(opts.blockEncoding); family.setCompressionType(opts.compression); + family.setBloomFilterType(opts.bloomType); if (opts.inMemoryCF) { family.setInMemory(true); } @@ -510,6 +512,7 @@ public class PerformanceEvaluation extends Configured implements Tool { this.compression = that.compression; this.blockEncoding = that.blockEncoding; this.filterAll = that.filterAll; + this.bloomType = that.bloomType; this.valueRandom = that.valueRandom; this.valueSize = that.valueSize; this.period = that.period; @@ -535,6 +538,7 @@ public class PerformanceEvaluation extends Configured implements Tool { public boolean inMemoryCF = false; public int presplitRegions = 0; public Compression.Algorithm compression = Compression.Algorithm.NONE; + public BloomType bloomType = BloomType.ROW; public DataBlockEncoding blockEncoding = DataBlockEncoding.NONE; public boolean valueRandom = false; public int valueSize = DEFAULT_VALUE_LENGTH; @@ -1238,6 +1242,7 @@ public class PerformanceEvaluation extends Configured implements Tool { + " there by not returning any thing back to the client. Helps to check the server side" + " performance. Uses FilterAllFilter internally. "); System.err.println(" latency Set to report operation latencies. Default: False"); + System.err.println(" bloomFilter Bloom filter type, one of " + Arrays.toString(BloomType.values())); System.err.println(" valueSize Pass value size to use: Default: 1024"); System.err.println(" valueRandom Set if we should vary value size between 0 and " + "'valueSize': Default: Not set."); @@ -1366,7 +1371,7 @@ public class PerformanceEvaluation extends Configured implements Tool { opts.presplitRegions = Integer.parseInt(cmd.substring(presplit.length())); continue; } - + final String inMemory = "--inmemory="; if (cmd.startsWith(inMemory)) { opts.inMemoryCF = Boolean.parseBoolean(cmd.substring(inMemory.length())); @@ -1409,6 +1414,12 @@ public class PerformanceEvaluation extends Configured implements Tool { continue; } + final String bloomFilter = "--bloomFilter"; + if (cmd.startsWith(bloomFilter)) { + opts.bloomType = BloomType.valueOf(cmd.substring(bloomFilter.length())); + continue; + } + final String valueSize = "--valueSize="; if (cmd.startsWith(valueSize)) { opts.valueSize = Integer.parseInt(cmd.substring(valueSize.length())); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java index 4d17556..a893317 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java @@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.PerformanceEvaluation; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.crypto.Cipher; @@ -112,6 +112,8 @@ public class LoadTestTool extends AbstractHBaseTool { private static final String OPT_BLOOM = "bloom"; private static final String OPT_COMPRESSION = "compression"; + private static final String OPT_DEFERRED_LOG_FLUSH = "deferredlogflush"; + public static final String OPT_DEFERRED_LOG_FLUSH_USAGE = "Enable deferred log flush."; public static final String OPT_DATA_BLOCK_ENCODING = HColumnDescriptor.DATA_BLOCK_ENCODING.toLowerCase(); @@ -157,6 +159,7 @@ public class LoadTestTool extends AbstractHBaseTool { protected long startKey, endKey; protected boolean isWrite, isRead, isUpdate; + protected boolean deferredLogFlush; // Column family options protected DataBlockEncoding dataBlockEncodingAlgo; @@ -260,6 +263,7 @@ public class LoadTestTool extends AbstractHBaseTool { } LOG.info("Enabling table " + tableName); admin.enableTable(tableName); + admin.close(); } @Override @@ -306,6 +310,7 @@ public class LoadTestTool extends AbstractHBaseTool { + "tool will create the test table with n regions per server"); addOptWithArg(OPT_ENCRYPTION, OPT_ENCRYPTION_USAGE); + addOptNoArg(OPT_DEFERRED_LOG_FLUSH, OPT_DEFERRED_LOG_FLUSH_USAGE); } @Override @@ -319,6 +324,7 @@ public class LoadTestTool extends AbstractHBaseTool { isRead = cmd.hasOption(OPT_READ); isUpdate = cmd.hasOption(OPT_UPDATE); isInitOnly = cmd.hasOption(OPT_INIT_ONLY); + deferredLogFlush = cmd.hasOption(OPT_DEFERRED_LOG_FLUSH); if (!isWrite && !isRead && !isUpdate && !isInitOnly) { throw new IllegalArgumentException("Either -" + OPT_WRITE + " or " + @@ -434,18 +440,25 @@ public class LoadTestTool extends AbstractHBaseTool { Compression.Algorithm.valueOf(compressStr); String bloomStr = cmd.getOptionValue(OPT_BLOOM); - bloomType = bloomStr == null ? null : + bloomType = bloomStr == null ? BloomType.ROW : BloomType.valueOf(bloomStr); inMemoryCF = cmd.hasOption(OPT_INMEMORY); if (cmd.hasOption(OPT_ENCRYPTION)) { cipher = Encryption.getCipher(conf, cmd.getOptionValue(OPT_ENCRYPTION)); } + } public void initTestTable() throws IOException { - HBaseTestingUtility.createPreSplitLoadTestTable(conf, tableName, - COLUMN_FAMILY, compressAlgo, dataBlockEncodingAlgo); + HTableDescriptor desc = new HTableDescriptor(tableName); + if (deferredLogFlush) { + desc.setDurability(Durability.ASYNC_WAL); + } + HColumnDescriptor hcd = new HColumnDescriptor(COLUMN_FAMILY); + hcd.setDataBlockEncoding(dataBlockEncodingAlgo); + hcd.setCompressionType(compressAlgo); + HBaseTestingUtility.createPreSplitLoadTestTable(conf, desc, hcd); applyColumnFamilyOptions(tableName, COLUMN_FAMILIES); }