commit 05bacdd66add21d81bfb963bced21a79da2294b0 Author: Enis Soztutar Date: Wed Jan 22 17:18:37 2014 -0800 HBASE-10572 Add IntegrationTest for timebounded reads from region replicas with ChaosMonkey diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index dfeb9ef..6394d66 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -561,7 +561,7 @@ public class HRegionInfo implements Comparable { break; } } - if(offset == -1) throw new IOException("Invalid regionName format"); + if(offset == -1) throw new IOException("Invalid regionName format: " + Bytes.toString(regionName)); byte[] tableName = new byte[offset]; System.arraycopy(regionName, 0, tableName, 0, offset); offset = -1; @@ -590,7 +590,7 @@ public class HRegionInfo implements Comparable { break; } } - if(offset == -1) throw new IOException("Invalid regionName format"); + if(offset == -1) throw new IOException("Invalid regionName format: " + Bytes.toString(regionName)); byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; if(offset != tableName.length + 1) { startKey = new byte[offset - tableName.length - 1]; diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java index fc30cbc..6d3e400 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java @@ -237,7 +237,7 @@ public class MetaReader { parsedInfo = parseRegionInfoFromRegionName(regionName); row = getMetaKeyForRegion(parsedInfo); } catch (Exception parseEx) { - LOG.warn("Received parse exception:" + parseEx); + // Ignore. This is used with tableName passed as regionName. } Get get = new Get(row); get.addFamily(HConstants.CATALOG_FAMILY); diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java index c1669c2..39b821e 100644 --- hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java +++ hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java @@ -100,13 +100,21 @@ public abstract class IntegrationTestBase extends AbstractHBaseTool { util = getTestingUtil(getConf()); MonkeyFactory fact = MonkeyFactory.getFactory(monkeyToUse); if (fact == null) { - // Run with no monkey in distributed context, with real monkey in local test context. - fact = MonkeyFactory.getFactory( - util.isDistributedCluster() ? MonkeyFactory.CALM : MonkeyFactory.SLOW_DETERMINISTIC); + fact = getDefaultMonkeyFactory(); } monkey = fact.setUtil(util) .setTableName(getTablename()) .setColumnFamilies(getColumnFamilies()).build(); + startMonkey(); + } + + protected MonkeyFactory getDefaultMonkeyFactory() { + // Run with no monkey in distributed context, with real monkey in local test context. + return MonkeyFactory.getFactory( + util.isDistributedCluster() ? MonkeyFactory.CALM : MonkeyFactory.SLOW_DETERMINISTIC); + } + + protected void startMonkey() throws Exception { monkey.start(); } @@ -125,6 +133,7 @@ public abstract class IntegrationTestBase extends AbstractHBaseTool { if (this.util == null) { if (conf == null) { this.util = new IntegrationTestingUtility(); + this.setConf(util.getConfiguration()); } else { this.util = new IntegrationTestingUtility(conf); } diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java index 6717933..f71ff20 100644 --- hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java +++ hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java @@ -42,21 +42,39 @@ import com.google.common.collect.Sets; @Category(IntegrationTests.class) public class IntegrationTestIngest extends IntegrationTestBase { public static final char HIPHEN = '-'; - private static final int SERVER_COUNT = 4; // number of slaves for the smallest cluster + private static final int SERVER_COUNT = 1; // number of slaves for the smallest cluster private static final long DEFAULT_RUN_TIME = 20 * 60 * 1000; private static final long JUNIT_RUN_TIME = 10 * 60 * 1000; /** A soft limit on how long we should run */ - private static final String RUN_TIME_KEY = "hbase.%s.runtime"; + protected static final String RUN_TIME_KEY = "hbase.%s.runtime"; + + protected static final String NUM_KEYS_PER_SERVER_KEY = "num_keys_per_server"; + protected static final long DEFAULT_NUM_KEYS_PER_SERVER = 2500; + + protected static final String NUM_WRITE_THREADS_KEY = "num_write_threads"; + protected static final int DEFAULT_NUM_WRITE_THREADS = 20; + + protected static final String NUM_READ_THREADS_KEY = "num_read_threads"; + protected static final int DEFAULT_NUM_READ_THREADS = 20; protected static final Log LOG = LogFactory.getLog(IntegrationTestIngest.class); protected IntegrationTestingUtility util; protected HBaseCluster cluster; protected LoadTestTool loadTool; + protected String[] LOAD_TEST_TOOL_INIT_ARGS = { + LoadTestTool.OPT_COMPRESSION, + LoadTestTool.OPT_DATA_BLOCK_ENCODING, + LoadTestTool.OPT_INMEMORY, + LoadTestTool.OPT_ENCRYPTION, + LoadTestTool.OPT_NUM_REGIONS_PER_SERVER, + LoadTestTool.OPT_REGION_REPLICATION, + }; + @Override public void setUpCluster() throws Exception { - util = getTestingUtil(null); + util = getTestingUtil(getConf()); LOG.debug("Initializing/checking cluster has " + SERVER_COUNT + " servers"); util.initializeCluster(SERVER_COUNT); LOG.debug("Done initializing/checking cluster"); @@ -70,7 +88,7 @@ public class IntegrationTestIngest extends IntegrationTestBase { } protected void initTable() throws IOException { - int ret = loadTool.run(new String[] { "-tn", getTablename(), "-init_only" }); + int ret = loadTool.run(getArgsForLoadTestToolInitTable()); Assert.assertEquals("Failed to initialize LoadTestTool", 0, ret); } @@ -82,16 +100,24 @@ public class IntegrationTestIngest extends IntegrationTestBase { @Test public void testIngest() throws Exception { - runIngestTest(JUNIT_RUN_TIME, 2500, 10, 1024, 10); + runIngestTest(JUNIT_RUN_TIME, 2500, 10, 1024, 10, 20); } - private void internalRunIngestTest(long runTime) throws Exception { - runIngestTest(runTime, 2500, 10, 1024, 10); + protected void internalRunIngestTest(long runTime) throws Exception { + String clazz = this.getClass().getSimpleName(); + long numKeysPerServer = conf.getLong(String.format("%s.%s", clazz, NUM_KEYS_PER_SERVER_KEY), + DEFAULT_NUM_KEYS_PER_SERVER); + int numWriteThreads = conf.getInt( + String.format("%s.%s", clazz, NUM_WRITE_THREADS_KEY), DEFAULT_NUM_WRITE_THREADS); + int numReadThreads = conf.getInt( + String.format("%s.%s", clazz, NUM_READ_THREADS_KEY), DEFAULT_NUM_READ_THREADS); + runIngestTest(runTime, numKeysPerServer, 10, 1024, numWriteThreads, numReadThreads); } @Override public String getTablename() { - return this.getClass().getSimpleName(); + String clazz = this.getClass().getSimpleName(); + return conf.get(String.format("%s.%s", clazz, LoadTestTool.OPT_TABLE_NAME), clazz); } @Override @@ -105,8 +131,9 @@ public class IntegrationTestIngest extends IntegrationTestBase { } } - protected void runIngestTest(long defaultRunTime, int keysPerServerPerIter, int colsPerKey, - int recordSize, int writeThreads) throws Exception { + protected void runIngestTest(long defaultRunTime, long keysPerServerPerIter, int colsPerKey, + int recordSize, int writeThreads, int readThreads) throws Exception { + LOG.info("Running ingest"); LOG.info("Cluster size:" + util.getHBaseClusterInterface().getClusterStatus().getServersSize()); @@ -137,7 +164,8 @@ public class IntegrationTestIngest extends IntegrationTestBase { Assert.fail(errorMsg); } - ret = loadTool.run(getArgsForLoadTestTool("-read", "100:20", startKey, numKeys)); + ret = loadTool.run(getArgsForLoadTestTool("-read", String.format("100:%d", readThreads) + , startKey, numKeys)); if (0 != ret) { String errorMsg = "Verification failed with error code " + ret; LOG.error(errorMsg); @@ -147,6 +175,23 @@ public class IntegrationTestIngest extends IntegrationTestBase { } } + protected String[] getArgsForLoadTestToolInitTable() { + List args = new ArrayList(); + args.add("-tn"); + args.add(getTablename()); + // pass all remaining args from conf with keys . + String clazz = this.getClass().getSimpleName(); + for (String arg : LOAD_TEST_TOOL_INIT_ARGS) { + String val = conf.get(String.format("%s.%s", clazz, arg)); + if (val != null) { + args.add("-" + arg); + args.add(val); + } + } + args.add("-init_only"); + return args.toArray(new String[args.size()]); + } + protected String[] getArgsForLoadTestTool(String mode, String modeSpecificArg, long startKey, long numKeys) { List args = new ArrayList(); @@ -159,11 +204,12 @@ public class IntegrationTestIngest extends IntegrationTestBase { args.add("-num_keys"); args.add(String.valueOf(numKeys)); args.add("-skip_init"); + return args.toArray(new String[args.size()]); } /** Estimates a data size based on the cluster size */ - private long getNumKeys(int keysPerServer) + protected long getNumKeys(long keysPerServer) throws IOException { int numRegionServers = cluster.getClusterStatus().getServersSize(); return keysPerServer * numRegionServers; diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/factories/DestructiveMonkeyFactory.java hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/factories/DestructiveMonkeyFactory.java new file mode 100644 index 0000000..1ef0558 --- /dev/null +++ hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/factories/DestructiveMonkeyFactory.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.chaos.factories; + +import org.apache.hadoop.hbase.chaos.actions.Action; +import org.apache.hadoop.hbase.chaos.actions.BatchRestartRsAction; +import org.apache.hadoop.hbase.chaos.actions.DumpClusterStatusAction; +import org.apache.hadoop.hbase.chaos.actions.RestartActiveMasterAction; +import org.apache.hadoop.hbase.chaos.actions.RestartRandomRsAction; +import org.apache.hadoop.hbase.chaos.actions.RestartRsHoldingMetaAction; +import org.apache.hadoop.hbase.chaos.actions.RollingBatchRestartRsAction; +import org.apache.hadoop.hbase.chaos.monkies.ChaosMonkey; +import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; +import org.apache.hadoop.hbase.chaos.policies.CompositeSequentialPolicy; +import org.apache.hadoop.hbase.chaos.policies.DoActionsOncePolicy; +import org.apache.hadoop.hbase.chaos.policies.PeriodicRandomActionPolicy; + +/** + * Creates ChaosMonkeys for doing server restart actions, but not + * flush / compact / snapshot kind of actions. + */ +public class DestructiveMonkeyFactory extends MonkeyFactory { + + @Override + public ChaosMonkey build() { + // Destructive actions to mess things around. + Action[] actions1 = new Action[] { + new RestartRandomRsAction(60000), + new BatchRestartRsAction(5000, 0.5f), + new RestartActiveMasterAction(5000), + new RollingBatchRestartRsAction(5000, 1.0f), + new RestartRsHoldingMetaAction(35000) + }; + + // Action to log more info for debugging + Action[] actions2 = new Action[] { + new DumpClusterStatusAction() + }; + + return new PolicyBasedChaosMonkey(util, + new CompositeSequentialPolicy( + new DoActionsOncePolicy(60 * 1000, actions1), + new PeriodicRandomActionPolicy(60 * 1000, actions1)), + new PeriodicRandomActionPolicy(60 * 1000, actions2)); + } +} diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/factories/MonkeyFactory.java hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/factories/MonkeyFactory.java index 9518689..520c872 100644 --- hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/factories/MonkeyFactory.java +++ hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/factories/MonkeyFactory.java @@ -21,10 +21,11 @@ package org.apache.hadoop.hbase.chaos.factories; import java.util.Map; import java.util.Set; -import com.google.common.collect.ImmutableMap; import org.apache.hadoop.hbase.IntegrationTestingUtility; import org.apache.hadoop.hbase.chaos.monkies.ChaosMonkey; +import com.google.common.collect.ImmutableMap; + /** * Base class of the factory that will create a ChaosMonkey. */ @@ -56,11 +57,13 @@ public abstract class MonkeyFactory { // TODO: the name has become a misnomer since the default (not-slow) monkey has been removed public static final String SLOW_DETERMINISTIC = "slowDeterministic"; public static final String UNBALANCE = "unbalance"; + public static final String DESTRUCTIVE = "destructive"; public static Map FACTORIES = ImmutableMap.builder() .put(CALM, new CalmMonkeyFactory()) .put(SLOW_DETERMINISTIC, new SlowDeterministicMonkeyFactory()) .put(UNBALANCE, new UnbalanceMonkeyFactory()) + .put(DESTRUCTIVE, new DestructiveMonkeyFactory()) .build(); public static MonkeyFactory getFactory(String factoryName) { diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java new file mode 100644 index 0000000..964492c --- /dev/null +++ hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestTimeBoundedRequestsWithRegionReplicas.java @@ -0,0 +1,313 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.test; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.commons.lang.math.RandomUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.IntegrationTestIngest; +import org.apache.hadoop.hbase.IntegrationTestingUtility; +import org.apache.hadoop.hbase.IntegrationTests; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.chaos.factories.MonkeyFactory; +import org.apache.hadoop.hbase.client.Consistency; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.util.LoadTestTool; +import org.apache.hadoop.hbase.util.MultiThreadedReader; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Assert; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +/** + * TODO + * -Dhbase.DIntegrationTestTimeBoundedRequestsWithRegionReplicas.runtime + * -DIntegrationTestTimeBoundedRequestsWithRegionReplicas.num_regions_per_server + * -DIntegrationTestTimeBoundedRequestsWithRegionReplicas.get_timeout_ms + * -DIntegrationTestTimeBoundedRequestsWithRegionReplicas.region_replication + * -DIntegrationTestTimeBoundedRequestsWithRegionReplicas.chaos_monkey_delay + * -DIntegrationTestTimeBoundedRequestsWithRegionReplicas.num_read_threads + * -DIntegrationTestTimeBoundedRequestsWithRegionReplicas.num_write_threads + * Use this test with "destructive" ChaosMonkey. + */ +@Category(IntegrationTests.class) +public class IntegrationTestTimeBoundedRequestsWithRegionReplicas extends IntegrationTestIngest { + + private static final Log LOG = LogFactory.getLog( + IntegrationTestTimeBoundedRequestsWithRegionReplicas.class); + + private static final String TEST_NAME + = IntegrationTestTimeBoundedRequestsWithRegionReplicas.class.getSimpleName(); + + protected static final long DEFAULT_GET_TIMEOUT = 5000; // 5 sec + protected static final String GET_TIMEOUT_KEY = "get_timeout_ms"; + + protected static final long DEFAUL_CHAOS_MONKEY_DELAY = 20 * 1000; // 20 sec + protected static final String CHAOS_MONKEY_DELAY_KEY = "chaos_monkey_delay"; + + protected static final int DEFAULT_REGION_REPLICATION = 3; + + @Override + protected void startMonkey() throws Exception { + // we do not want to start the monkey at the start of the test. + } + + @Override + protected MonkeyFactory getDefaultMonkeyFactory() { + return MonkeyFactory.getFactory(MonkeyFactory.CALM); + } + + @Override + public void setConf(Configuration conf) { + super.setConf(conf); + // default replication for this test is 3 + String clazz = this.getClass().getSimpleName(); + conf.setIfUnset(String.format("%s.%s", clazz, LoadTestTool.OPT_REGION_REPLICATION), + Integer.toString(DEFAULT_REGION_REPLICATION)); + } + + protected void writeData(int colsPerKey, int recordSize, int writeThreads, + long startKey, long numKeys) throws IOException { + int ret = loadTool.run(getArgsForLoadTestTool("-write", + String.format("%d:%d:%d", colsPerKey, recordSize, writeThreads), startKey, numKeys)); + if (0 != ret) { + String errorMsg = "Load failed with error code " + ret; + LOG.error(errorMsg); + Assert.fail(errorMsg); + } + } + + @Override + protected void runIngestTest(long defaultRunTime, long keysPerServerPerIter, int colsPerKey, + int recordSize, int writeThreads, int readThreads) throws Exception { + LOG.info("Cluster size:"+ + util.getHBaseClusterInterface().getClusterStatus().getServersSize()); + + long start = System.currentTimeMillis(); + String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName()); + long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime); + long startKey = 0; + + long numKeys = getNumKeys(keysPerServerPerIter); + + + // write data once + LOG.info("Writing some data to the table"); + writeData(colsPerKey, recordSize, writeThreads, startKey, numKeys); + + // flush the table + LOG.info("Flushing the table"); + HBaseAdmin admin = util.getHBaseAdmin(); + admin.flush(getTablename()); + + // re-open the regions to make sure that the replicas are up to date + LOG.info("Reopening the table"); + admin.disableTable(getTablename()); + admin.enableTable(getTablename()); + + // We should only start the ChaosMonkey after the readers are started and have cached + // all of the region locations. Because the meta is not replicated, the timebounded reads + // will timeout if meta server is killed. + // We will start the chaos monkey after 1 minute, and since the readers are reading random + // keys, it should be enough to cache every region entry. + long chaosMonkeyDelay = conf.getLong(String.format("%s.%s", TEST_NAME, CHAOS_MONKEY_DELAY_KEY) + , DEFAUL_CHAOS_MONKEY_DELAY); + ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor(); + LOG.info(String.format("ChaosMonkey delay is : %d seconds. Will start %s " + + "ChaosMonkey after delay", chaosMonkeyDelay / 1000, monkeyToUse)); + ScheduledFuture result = executorService.schedule(new Runnable() { + @Override + public void run() { + try { + LOG.info("Starting ChaosMonkey"); + monkey.start(); + monkey.waitForStop(); + } catch (Exception e) { + LOG.warn(StringUtils.stringifyException(e)); + } + + } + }, chaosMonkeyDelay, TimeUnit.MILLISECONDS); + + // set the intended run time for the reader. The reader will do read requests + // to random keys for this amount of time. + long remainingTime = runtime - (System.currentTimeMillis() - start); + LOG.info("Reading random keys from the table for " + remainingTime/60000 + " min"); + this.conf.setLong( + String.format(RUN_TIME_KEY, TimeBoundedMultiThreadedReader.class.getSimpleName()) + , remainingTime); // load tool shares the same conf + + // now start the readers which will run for configured run time + try { + int ret = loadTool.run(getArgsForLoadTestTool("-read", String.format("100:%d", readThreads) + , startKey, numKeys)); + if (0 != ret) { + String errorMsg = "Verification failed with error code " + ret; + LOG.error(errorMsg); + Assert.fail(errorMsg); + } + } finally { + if (result != null) result.cancel(false); + monkey.stop("Stopping the test"); + monkey.waitForStop(); + executorService.shutdown(); + } + } + + @Override + protected String[] getArgsForLoadTestTool(String mode, String modeSpecificArg, long startKey, + long numKeys) { + List args = Lists.newArrayList(super.getArgsForLoadTestTool( + mode, modeSpecificArg, startKey, numKeys)); + args.add("-reader"); + args.add(TimeBoundedMultiThreadedReader.class.getName()); + return args.toArray(new String[args.size()]); + } + + public static class TimeBoundedMultiThreadedReader extends MultiThreadedReader { + protected long timeoutNano; + protected AtomicLong timedOutReads = new AtomicLong(); + protected long runTime; + protected Thread timeoutThread; + + public TimeBoundedMultiThreadedReader(LoadTestDataGenerator dataGen, Configuration conf, + TableName tableName, double verifyPercent) { + super(dataGen, conf, tableName, verifyPercent); + long timeoutMs = conf.getLong( + String.format("%s.%s", TEST_NAME, GET_TIMEOUT_KEY), DEFAULT_GET_TIMEOUT); + timeoutNano = timeoutMs * 1000000; + LOG.info("Timeout for gets: " + timeoutMs); + String runTimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName()); + this.runTime = conf.getLong(runTimeKey, -1); + if (this.runTime <= 0) { + throw new IllegalArgumentException("Please configure " + runTimeKey); + } + } + + @Override + public void waitForFinish() { + try { + this.timeoutThread.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + this.aborted = true; + super.waitForFinish(); + } + + @Override + protected String progressInfo() { + StringBuilder builder = new StringBuilder(super.progressInfo()); + appendToStatus(builder, "get_timeouts", timedOutReads.get()); + return builder.toString(); + } + + @Override + public void start(long startKey, long endKey, int numThreads) throws IOException { + super.start(startKey, endKey, numThreads); + this.timeoutThread = new TimeoutThread(this.runTime); + this.timeoutThread.start(); + } + + @Override + protected HBaseReaderThread createReaderThread(int readerId) throws IOException { + return new TimeBoundedMultiThreadedReaderThread(readerId); + } + + private class TimeoutThread extends Thread { + long timeout; + long reportInterval = 60000; + public TimeoutThread(long timeout) { + this.timeout = timeout; + } + + @Override + public void run() { + while (true) { + long rem = Math.min(timeout, reportInterval); + if (rem <= 0) { + break; + } + LOG.info("Remaining execution time:" + timeout / 60000 + " min"); + Threads.sleep(rem); + timeout -= rem; + } + } + } + + public class TimeBoundedMultiThreadedReaderThread + extends MultiThreadedReader.HBaseReaderThread { + + public TimeBoundedMultiThreadedReaderThread(int readerId) throws IOException { + super(readerId); + } + + @Override + protected Get createGet(long keyToRead) throws IOException { + Get get = super.createGet(keyToRead); + get.setConsistency(Consistency.TIMELINE); + return get; + } + + @Override + protected long getNextKeyToRead() { + // always read a random key, assuming that the writer has finished writing all keys + long key = startKey + Math.abs(RandomUtils.nextLong()) + % (endKey - startKey); + return key - (key % numThreads) + readerId; // round for this reader + } + + @Override + protected void verifyResultsAndUpdateMetrics(boolean verify, String rowKey, long elapsedNano, + Result result, HTable table, boolean isNullExpected) throws IOException { + super.verifyResultsAndUpdateMetrics(verify, rowKey, elapsedNano, result, table, isNullExpected); + // we actually do not timeout and cancel the reads after timeout. We just wait for the RPC + // to complete, but if the request took longer than timeout, we treat that as error. + if (elapsedNano > timeoutNano) { + timedOutReads.incrementAndGet(); + numReadFailures.addAndGet(1); // fail the test + } + } + } + } + + public static void main(String[] args) throws Exception { + Configuration conf = HBaseConfiguration.create(); + IntegrationTestingUtility.setUseDistributedCluster(conf); + int ret = ToolRunner.run(conf, new IntegrationTestTimeBoundedRequestsWithRegionReplicas(), args); + System.exit(ret); + } +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 67f8197..53f4efc 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -3117,11 +3117,22 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName, byte[] columnFamily, Algorithm compression, DataBlockEncoding dataBlockEncoding) throws IOException { + return createPreSplitLoadTestTable(conf, tableName, + columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER); + } + /** + * Creates a pre-split table for load testing. If the table already exists, + * logs a warning and continues. + * @return the number of regions the table was split into + */ + public static int createPreSplitLoadTestTable(Configuration conf, + TableName tableName, byte[] columnFamily, Algorithm compression, + DataBlockEncoding dataBlockEncoding, int numRegionsPerServer) throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); HColumnDescriptor hcd = new HColumnDescriptor(columnFamily); hcd.setDataBlockEncoding(dataBlockEncoding); hcd.setCompressionType(compression); - return createPreSplitLoadTestTable(conf, desc, hcd); + return createPreSplitLoadTestTable(conf, desc, hcd, numRegionsPerServer); } /** @@ -3131,6 +3142,16 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public static int createPreSplitLoadTestTable(Configuration conf, HTableDescriptor desc, HColumnDescriptor hcd) throws IOException { + return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER); + } + + /** + * Creates a pre-split table for load testing. If the table already exists, + * logs a warning and continues. + * @return the number of regions the table was split into + */ + public static int createPreSplitLoadTestTable(Configuration conf, + HTableDescriptor desc, HColumnDescriptor hcd, int numRegionsPerServer) throws IOException { if (!desc.hasFamily(hcd.getName())) { desc.addFamily(hcd); } @@ -3146,10 +3167,10 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { throw new IllegalStateException("No live regionservers"); } - totalNumberOfRegions = numberOfServers * DEFAULT_REGIONS_PER_SERVER; + totalNumberOfRegions = numberOfServers * numRegionsPerServer; LOG.info("Number of live regionservers: " + numberOfServers + ", " + "pre-splitting table into " + totalNumberOfRegions + " regions " + - "(default regions per server: " + DEFAULT_REGIONS_PER_SERVER + ")"); + "(regions per server: " + numRegionsPerServer + ")"); byte[][] splits = new RegionSplitter.HexStringSplit().split( totalNumberOfRegions); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java index debed23..3b45ae7 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java @@ -31,6 +31,7 @@ import javax.crypto.spec.SecretKeySpec; import org.apache.commons.cli.CommandLine; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -106,8 +107,8 @@ public class LoadTestTool extends AbstractHBaseTool { + "compression) to use for data blocks in the test column family, " + "one of " + Arrays.toString(DataBlockEncoding.values()) + "."; - private static final String OPT_BLOOM = "bloom"; - private static final String OPT_COMPRESSION = "compression"; + public static final String OPT_BLOOM = "bloom"; + public static final String OPT_COMPRESSION = "compression"; public static final String OPT_DATA_BLOCK_ENCODING = HColumnDescriptor.DATA_BLOCK_ENCODING.toLowerCase(); @@ -119,6 +120,9 @@ public class LoadTestTool extends AbstractHBaseTool { public static final String OPT_GENERATOR_USAGE = "The class which generates load for the tool." + " Any args for this class can be passed as colon separated after class name"; + public static final String OPT_READER = "reader"; + public static final String OPT_READER_USAGE = "The class for executing the read requests"; + protected static final String OPT_KEY_WINDOW = "key_window"; protected static final String OPT_WRITE = "write"; protected static final String OPT_MAX_READ_ERRORS = "max_read_errors"; @@ -126,7 +130,7 @@ public class LoadTestTool extends AbstractHBaseTool { protected static final String OPT_NUM_KEYS = "num_keys"; protected static final String OPT_READ = "read"; protected static final String OPT_START_KEY = "start_key"; - protected static final String OPT_TABLE_NAME = "tn"; + public static final String OPT_TABLE_NAME = "tn"; protected static final String OPT_ZK_QUORUM = "zk"; protected static final String OPT_ZK_PARENT_NODE = "zk_root"; protected static final String OPT_SKIP_INIT = "skip_init"; @@ -135,11 +139,20 @@ public class LoadTestTool extends AbstractHBaseTool { protected static final String OPT_BATCHUPDATE = "batchupdate"; protected static final String OPT_UPDATE = "update"; - protected static final String OPT_ENCRYPTION = "encryption"; + public static final String OPT_ENCRYPTION = "encryption"; protected static final String OPT_ENCRYPTION_USAGE = "Enables transparent encryption on the test table, one of " + Arrays.toString(Encryption.getSupportedCiphers()); + public static final String OPT_NUM_REGIONS_PER_SERVER = "num_regions_per_server"; + protected static final String OPT_NUM_REGIONS_PER_SERVER_USAGE + = "Desired number of regions per region server. Defaults to 5."; + protected static int DEFAULT_NUM_REGIONS_PER_SERVER = 5; + + public static final String OPT_REGION_REPLICATION = "region_replication"; + protected static final String OPT_REGION_REPLICATION_USAGE = + "Desired number of replicas per region"; + protected static final long DEFAULT_START_KEY = 0; /** This will be removed as we factor out the dependency on command line */ @@ -180,6 +193,9 @@ public class LoadTestTool extends AbstractHBaseTool { private int numTables = 1; + private int numRegionsPerServer = DEFAULT_NUM_REGIONS_PER_SERVER; + private int regionReplication = -1; // not set + // TODO: refactor LoadTestToolImpl somewhere to make the usage from tests less bad, // console tool itself should only be used from console. protected boolean isSkipInit = false; @@ -246,6 +262,10 @@ public class LoadTestTool extends AbstractHBaseTool { admin.modifyColumn(tableName, columnDesc); } } + if (regionReplication > 0) { + tableDesc.setRegionReplication(regionReplication); + admin.modifyTable(tableName, tableDesc); + } LOG.info("Enabling table " + tableName); admin.enableTable(tableName); } @@ -276,6 +296,7 @@ public class LoadTestTool extends AbstractHBaseTool { "separate updates for every column in a row"); addOptNoArg(OPT_INMEMORY, OPT_USAGE_IN_MEMORY); addOptWithArg(OPT_GENERATOR, OPT_GENERATOR_USAGE); + addOptWithArg(OPT_READER, OPT_READER_USAGE); addOptWithArg(OPT_NUM_KEYS, "The number of keys to read/write"); addOptWithArg(OPT_START_KEY, "The first key to read/write " + @@ -290,6 +311,8 @@ public class LoadTestTool extends AbstractHBaseTool { + "table name prefix. Each table name is in format _1..._n"); addOptWithArg(OPT_ENCRYPTION, OPT_ENCRYPTION_USAGE); + addOptWithArg(OPT_NUM_REGIONS_PER_SERVER, OPT_NUM_REGIONS_PER_SERVER_USAGE); + addOptWithArg(OPT_REGION_REPLICATION, OPT_REGION_REPLICATION_USAGE); } @Override @@ -399,6 +422,16 @@ public class LoadTestTool extends AbstractHBaseTool { if(cmd.hasOption(NUM_TABLES)) { numTables = parseInt(cmd.getOptionValue(NUM_TABLES), 1, Short.MAX_VALUE); } + + numRegionsPerServer = DEFAULT_NUM_REGIONS_PER_SERVER; + if (cmd.hasOption(OPT_NUM_REGIONS_PER_SERVER)) { + numRegionsPerServer = Integer.parseInt(cmd.getOptionValue(OPT_NUM_REGIONS_PER_SERVER)); + } + + regionReplication = -1; + if (cmd.hasOption(OPT_REGION_REPLICATION)) { + regionReplication = Integer.parseInt(cmd.getOptionValue(OPT_REGION_REPLICATION)); + } } private void parseColumnFamilyOptions(CommandLine cmd) { @@ -422,7 +455,7 @@ public class LoadTestTool extends AbstractHBaseTool { public void initTestTable() throws IOException { HBaseTestingUtility.createPreSplitLoadTestTable(conf, tableName, - COLUMN_FAMILY, compressAlgo, dataBlockEncodingAlgo); + COLUMN_FAMILY, compressAlgo, dataBlockEncodingAlgo, numRegionsPerServer); applyColumnFamilyOptions(tableName, COLUMN_FAMILIES); } @@ -509,11 +542,17 @@ public class LoadTestTool extends AbstractHBaseTool { } if (isRead) { - if (userOwner != null) { - readerThreads = new MultiThreadedReaderWithACL(dataGen, conf, tableName, verifyPercent); + String readerClass = null; + if (cmd.hasOption(OPT_READER)) { + readerClass = cmd.getOptionValue(OPT_READER); } else { - readerThreads = new MultiThreadedReader(dataGen, conf, tableName, verifyPercent); + if (userOwner != null) { + readerClass = MultiThreadedReaderWithACL.class.getCanonicalName(); + } else { + readerClass = MultiThreadedReader.class.getCanonicalName(); + } } + readerThreads = getMultiThreadedReaderInstance(readerClass, dataGen); readerThreads.setMaxErrors(maxReadErrors); readerThreads.setKeyWindow(keyWindow); } @@ -586,6 +625,18 @@ public class LoadTestTool extends AbstractHBaseTool { } } + private MultiThreadedReader getMultiThreadedReaderInstance(String clazzName + , LoadTestDataGenerator dataGen) throws IOException { + try { + Class clazz = Class.forName(clazzName); + Constructor constructor = clazz.getConstructor( + LoadTestDataGenerator.class, Configuration.class, TableName.class, double.class); + return (MultiThreadedReader) constructor.newInstance(dataGen, conf, tableName, verifyPercent); + } catch (Exception e) { + throw new IOException(e); + } + } + public static byte[] generateData(final Random r, int length) { byte [] b = new byte [length]; int i = 0; diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java index 0edeea7..b0d44fd 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java @@ -39,7 +39,7 @@ public class MultiThreadedReader extends MultiThreadedAction protected Set readers = new HashSet(); private final double verifyPercent; - private volatile boolean aborted; + protected volatile boolean aborted; protected MultiThreadedWriterBase writer = null; @@ -104,11 +104,15 @@ public class MultiThreadedReader extends MultiThreadedAction protected void addReaderThreads(int numThreads) throws IOException { for (int i = 0; i < numThreads; ++i) { - HBaseReaderThread reader = new HBaseReaderThread(i); + HBaseReaderThread reader = createReaderThread(i); readers.add(reader); } } + protected HBaseReaderThread createReaderThread(int readerId) throws IOException { + return new HBaseReaderThread(readerId); + } + public class HBaseReaderThread extends Thread { protected final int readerId; protected final HTable table; @@ -122,6 +126,8 @@ public class MultiThreadedReader extends MultiThreadedAction /** If we are ahead of the writer and reading a random key. */ private boolean readingRandomKey; + private boolean printExceptionTrace = true; + /** * @param readerId only the keys with this remainder from division by * {@link #numThreads} will be read by this thread @@ -204,7 +210,7 @@ public class MultiThreadedReader extends MultiThreadedAction return Math.min(endKey - 1, writer.wroteUpToKey() - keyWindow); } - private long getNextKeyToRead() { + protected long getNextKeyToRead() { readingRandomKey = false; if (writer == null || curKey <= maxKeyWeCanRead()) { return curKey++; @@ -235,6 +241,24 @@ public class MultiThreadedReader extends MultiThreadedAction } private Get readKey(long keyToRead) { + Get get = null; + try { + get = createGet(keyToRead); + queryKey(get, RandomUtils.nextInt(100) < verifyPercent, keyToRead); + } catch (IOException e) { + numReadFailures.addAndGet(1); + LOG.debug("[" + readerId + "] FAILED read, key = " + (keyToRead + "") + + ", time from start: " + + (System.currentTimeMillis() - startTimeMs) + " ms"); + if (printExceptionTrace) { + LOG.warn(e); + printExceptionTrace = false; + } + } + return get; + } + + protected Get createGet(long keyToRead) throws IOException { Get get = new Get(dataGenerator.getDeterministicUniqueKey(keyToRead)); String cfsString = ""; byte[][] columnFamilies = dataGenerator.getColumnFamilies(); @@ -247,18 +271,9 @@ public class MultiThreadedReader extends MultiThreadedAction cfsString += "[" + Bytes.toStringBinary(cf) + "]"; } } - - try { - get = dataGenerator.beforeGet(keyToRead, get); - if (verbose) { - LOG.info("[" + readerId + "] " + "Querying key " + keyToRead + ", cfs " + cfsString); - } - queryKey(get, RandomUtils.nextInt(100) < verifyPercent, keyToRead); - } catch (IOException e) { - numReadFailures.addAndGet(1); - LOG.debug("[" + readerId + "] FAILED read, key = " + (keyToRead + "") - + ", time from start: " - + (System.currentTimeMillis() - startTimeMs) + " ms"); + get = dataGenerator.beforeGet(keyToRead, get); + if (verbose) { + LOG.info("[" + readerId + "] " + "Querying key " + keyToRead + ", cfs " + cfsString); } return get; } @@ -267,15 +282,16 @@ public class MultiThreadedReader extends MultiThreadedAction String rowKey = Bytes.toString(get.getRow()); // read the data - long start = System.currentTimeMillis(); + long start = System.nanoTime(); Result result = table.get(get); - getResultMetricUpdation(verify, rowKey, start, result, table, false); + long end = System.nanoTime(); + verifyResultsAndUpdateMetrics(verify, rowKey, end - start, result, table, false); } - protected void getResultMetricUpdation(boolean verify, String rowKey, long start, + protected void verifyResultsAndUpdateMetrics(boolean verify, String rowKey, long elapsedNano, Result result, HTable table, boolean isNullExpected) throws IOException { - totalOpTimeMs.addAndGet(System.currentTimeMillis() - start); + totalOpTimeMs.addAndGet(elapsedNano / 1000000); numKeys.addAndGet(1); if (!result.isEmpty()) { if (verify) { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java index b71ff6d..420ae75 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java @@ -89,7 +89,7 @@ public class MultiThreadedReaderWithACL extends MultiThreadedReader { final String rowKey = Bytes.toString(get.getRow()); // read the data - final long start = System.currentTimeMillis(); + final long start = System.nanoTime(); PrivilegedExceptionAction action = new PrivilegedExceptionAction() { @Override public Object run() throws Exception { @@ -109,7 +109,8 @@ public class MultiThreadedReaderWithACL extends MultiThreadedReader { } boolean isNullExpected = ((((int) keyToRead % specialPermCellInsertionFactor)) == 0); LOG.info("Read happening from ACL " + isNullExpected); - getResultMetricUpdation(verify, rowKey, start, result, localTable, isNullExpected); + long end = System.nanoTime(); + verifyResultsAndUpdateMetrics(verify, rowKey, end - start, result, localTable, isNullExpected); } catch (IOException e) { recordFailure(keyToRead); }