Index: hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java (revision 1466569) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java (working copy) @@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.filter.BinaryComparator; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; @@ -574,6 +575,12 @@ final List threads = new ArrayList(this.N); final long[] timings = new long[this.N]; final int perClientRows = R/N; + final byte[] tableName = this.tableName; + final DataBlockEncoding encoding = this.blockEncoding; + final boolean flushCommits = this.flushCommits; + final Compression.Algorithm compression = this.compression; + final boolean writeToWal = this.writeToWAL; + final int preSplitRegions = this.presplitRegions; for (int i = 0; i < this.N; i++) { final int index = i; Thread t = new Thread ("TestClient-" + i) { @@ -581,6 +588,12 @@ public void run() { super.run(); PerformanceEvaluation pe = new PerformanceEvaluation(getConf()); + pe.setTableName(tableName); + pe.setBlockEncoding(encoding); + pe.setFlushCommits(flushCommits); + pe.setCompression(compression); + pe.setWriteToWAL(writeToWal); + pe.setPresplitRegions(preSplitRegions); pe.N = N; try { long elapsedTime = pe.runOneClient(cmd, index * perClientRows, @@ -626,6 +639,30 @@ + "\tAvg: " + (total / this.N) + "ms"); } + protected void setPresplitRegions(int preSplitRegions) { + this.presplitRegions = preSplitRegions; + } + + protected void setWriteToWAL(boolean writeToWAL) { + this.writeToWAL = writeToWAL; + } + + protected void setCompression(Algorithm compression) { + this.compression = compression; + } + + protected void setFlushCommits(boolean flushCommits) { + this.flushCommits = flushCommits; + } + + protected void setBlockEncoding(DataBlockEncoding blockEncoding) { + this.blockEncoding = blockEncoding; + } + + protected void setTableName(byte[] tableName) { + this.tableName = tableName; + } + /* * Run a mapreduce job. Run as many maps as asked-for clients. * Before we start up the job, write out an input file with instruction