Index: src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java (revision 1327302) +++ src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java (working copy) @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; @@ -129,7 +130,6 @@ // Process command line args for (int i = 0; i < args.length; i++) { String cmd = args[i]; - try { if (cmd.equals("-threads")) { numThreads = Integer.parseInt(args[++i]); @@ -153,7 +153,12 @@ verbose = true; } else if (cmd.equals("-roll")) { roll = Long.parseLong(args[++i]); + } else if (cmd.equals("-h")) { + printUsageAndExit(); + } else if (cmd.equals("--help")) { + printUsageAndExit(); } else { + System.err.println("UNEXPECTED: " + cmd); printUsageAndExit(); } } catch (Exception e) { @@ -163,6 +168,7 @@ // Run HLog Performance Evaluation FileSystem fs = FileSystem.get(getConf()); + LOG.info("" + fs); try { if (rootRegionDir == null) { rootRegionDir = TEST_UTIL.getDataTestDir("HLogPerformanceEvaluation"); @@ -280,6 +286,12 @@ System.err.println(" -verify Verify edits written in sequence"); System.err.println(" -verbose Output extra info; e.g. all edit seq ids when verifying"); System.err.println(" -roll Roll the way every N appends"); + System.err.println(""); + System.err.println("Examples:"); + System.err.println(""); + System.err.println(" To run 100 threads on hdfs with log rolling every 10k edits and verification afterward do:"); + System.err.println(" $ ./bin/hbase org.apache.hadoop.hbase.regionserver.wal.HLogPerformanceEvaluation \\"); + System.err.println(" -conf ./core-site.xml -path hdfs://example.org:7000/tmp -threads 100 -roll 10000 -verify"); System.exit(1); } @@ -337,7 +349,7 @@ } public static void main(String[] args) throws Exception { - int exitCode = ToolRunner.run(new HLogPerformanceEvaluation(), args); + int exitCode = ToolRunner.run(HBaseConfiguration.create(), new HLogPerformanceEvaluation(), args); System.exit(exitCode); } -} \ No newline at end of file +} Index: src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java (revision 1327302) +++ src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java (working copy) @@ -533,8 +533,9 @@ KeyValue kv = val.getKeyValues().get(0); assertTrue(Bytes.equals(HLog.METAROW, kv.getRow())); assertTrue(Bytes.equals(HLog.METAFAMILY, kv.getFamily())); - assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH, - val.getKeyValues().get(0).getValue())); + assertTrue(Bytes.startsWith( + val.getKeyValues().get(0).getValue(), + HLog.COMPLETE_CACHE_FLUSH)); System.out.println(key + " " + val); } } finally { @@ -601,8 +602,9 @@ assertTrue(Bytes.equals(tableName, entry.getKey().getTablename())); assertTrue(Bytes.equals(HLog.METAROW, val.getRow())); assertTrue(Bytes.equals(HLog.METAFAMILY, val.getFamily())); - assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH, - val.getValue())); + assertTrue(Bytes.startsWith( + val.getValue(), + HLog.COMPLETE_CACHE_FLUSH)); System.out.println(entry.getKey() + " " + val); } } finally { Index: src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java (revision 1327302) +++ src/test/java/org/apache/hadoop/hbase/io/encoding/TestLoadAndSwitchEncodeOnDisk.java (working copy) @@ -16,12 +16,21 @@ */ package org.apache.hadoop.hbase.io.encoding; +import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.Map; +import java.util.NavigableMap; import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.Compression; import org.apache.hadoop.hbase.regionserver.HRegionServer; @@ -64,8 +73,10 @@ super.loadTest(); HColumnDescriptor hcd = getColumnDesc(admin); - System.err.println("\nDisabling encode-on-disk. Old column descriptor: " + - hcd + "\n"); + System.err.println("\nDisabling encode-on-disk. Old column descriptor: " + hcd + "\n"); + HTable t = new HTable(this.conf, TABLE); + assertAllOnLine(t); + admin.disableTable(TABLE); hcd.setEncodeOnDisk(false); admin.modifyColumn(TABLE, hcd); @@ -76,6 +87,10 @@ System.err.println("\nNew column descriptor: " + getColumnDesc(admin) + "\n"); + // The table may not have all regions on line yet. Assert online before + // moving to major compact. + assertAllOnLine(t); + System.err.println("\nCompacting the table\n"); admin.majorCompact(TABLE); // Wait until compaction completes @@ -88,4 +103,15 @@ System.err.println("\nDone with the test, shutting down the cluster\n"); } + private void assertAllOnLine(final HTable t) throws IOException { + NavigableMap regions = t.getRegionLocations(); + for (Map.Entry e: regions.entrySet()) { + byte [] startkey = e.getKey().getStartKey(); + Scan s = new Scan(startkey); + ResultScanner scanner = t.getScanner(s); + Result r = scanner.next(); + org.junit.Assert.assertTrue(r != null && r.size() > 0); + scanner.close(); + } + } }