Index: hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java (revision 1463118) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java (working copy) @@ -136,6 +136,7 @@ private DataBlockEncoding blockEncoding = DataBlockEncoding.NONE; private boolean flushCommits = true; private boolean writeToWAL = true; + private boolean inMemoryCF = false; private int presplitRegions = 0; private static final Path PERF_EVAL_DIR = new Path("performance_evaluation"); @@ -525,6 +526,9 @@ HColumnDescriptor family = new HColumnDescriptor(FAMILY_NAME); family.setDataBlockEncoding(blockEncoding); family.setCompressionType(compression); + if (inMemoryCF) { + family.setInMemory(true); + } TABLE_DESCRIPTOR.addFamily(family); } return TABLE_DESCRIPTOR; @@ -1576,6 +1580,9 @@ System.err.println(" flushCommits Used to determine if the test should flush the table. Default: false"); System.err.println(" writeToWAL Set writeToWAL on puts. Default: True"); System.err.println(" presplit Create presplit table. Recommended for accurate perf analysis (see guide). Default: disabled"); + System.err + .println(" inmemory Load the CF in memory. HBase tries to server reads from HFiles " + + "blocks loaded in memory as far as possible. Default: false"); System.err.println(); System.err.println(" Note: -D properties will be applied to the conf used. "); System.err.println(" For example: "); @@ -1680,7 +1687,13 @@ this.presplitRegions = Integer.parseInt(cmd.substring(presplit.length())); continue; } - + + final String inMemory = "--inmemory="; + if (cmd.startsWith(inMemory)) { + this.inMemoryCF = Boolean.getBoolean(cmd.substring(inMemory.length())); + continue; + } + Class cmdClass = determineCommandClass(cmd); if (cmdClass != null) { getArgs(i + 1, args); Index: hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java (revision 1463118) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java (working copy) @@ -86,6 +86,10 @@ public static final String OPT_ENCODE_IN_CACHE_ONLY_USAGE = "If this is specified, data blocks will only be encoded in block " + "cache but not on disk"; + + public static final String OPT_INMEMORY = "in_memory"; + public static final String OPT_USAGE_IN_MEMORY = "If this is specified, then the Column family " + + "is served from in_memory. The HFile blocks are tried maximum to be served from in memory"; private static final String OPT_KEY_WINDOW = "key_window"; private static final String OPT_WRITE = "write"; @@ -116,7 +120,7 @@ private boolean encodeInCacheOnly; private Compression.Algorithm compressAlgo; private BloomType bloomType; - + private boolean inMemoryCF; // Writer options private int numWriterThreads = DEFAULT_NUM_THREADS; private int minColsPerKey, maxColsPerKey; @@ -177,6 +181,9 @@ columnDesc.setDataBlockEncoding(dataBlockEncodingAlgo); columnDesc.setEncodeOnDisk(!encodeInCacheOnly); } + if (inMemoryCF) { + columnDesc.setInMemory(inMemoryCF); + } if (isNewCf) { admin.addColumn(tableName, columnDesc); } else { @@ -208,6 +215,7 @@ addOptNoArg(OPT_MULTIPUT, "Whether to use multi-puts as opposed to " + "separate puts for every column in a row"); addOptNoArg(OPT_ENCODE_IN_CACHE_ONLY, OPT_ENCODE_IN_CACHE_ONLY_USAGE); + addOptNoArg(OPT_INMEMORY, OPT_USAGE_IN_MEMORY); addOptWithArg(OPT_NUM_KEYS, "The number of keys to read/write"); addOptWithArg(OPT_START_KEY, "The first key to read/write " + @@ -318,6 +326,9 @@ String bloomStr = cmd.getOptionValue(OPT_BLOOM); bloomType = bloomStr == null ? null : BloomType.valueOf(bloomStr); + + inMemoryCF = cmd.hasOption(OPT_INMEMORY); + } public void initTestTable() throws IOException {