diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index eb536a3..662e718 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -160,12 +160,12 @@ public final class Canary implements Tool { if (index >= 0) { // command line args must be in the form: [opts] [table 1 [table 2 ...]] System.err.println("Invalid command line options"); - printUsageAndExit(); + return printUsageAndExit(); } if (cmd.equals("-help")) { // user asked for help, print the help and quit. - printUsageAndExit(); + return printUsageAndExit(); } else if (cmd.equals("-daemon") && interval == 0) { // user asked for daemon mode, set a default interval between checks interval = DEFAULT_INTERVAL; @@ -175,14 +175,14 @@ public final class Canary implements Tool { if (i == args.length) { System.err.println("-interval needs a numeric value argument."); - printUsageAndExit(); + return printUsageAndExit(); } try { interval = Long.parseLong(args[i]) * 1000; } catch (NumberFormatException e) { System.err.println("-interval needs a numeric value argument."); - printUsageAndExit(); + return printUsageAndExit(); } } else if(cmd.equals("-regionserver")) { this.regionServerMode = true; @@ -193,14 +193,14 @@ public final class Canary implements Tool { if (i == args.length) { System.err.println("-t needs a numeric value argument."); - printUsageAndExit(); + return printUsageAndExit(); } try { this.timeout = Long.parseLong(args[i]); } catch (NumberFormatException e) { System.err.println("-t needs a numeric value argument."); - printUsageAndExit(); + return printUsageAndExit(); } } else if (cmd.equals("-f")) { @@ -209,14 +209,14 @@ public final class Canary implements Tool { if (i == args.length) { System.err .println("-f needs a boolean value argument (true|false)."); - printUsageAndExit(); + return printUsageAndExit(); } this.failOnError = Boolean.parseBoolean(args[i]); } else { // no options match System.err.println(cmd + " options is invalid."); - printUsageAndExit(); + return printUsageAndExit(); } } else if (index < 0) { // keep track of first table name specified by the user @@ -242,7 +242,7 @@ public final class Canary implements Tool { // exit if any error occurs if (this.failOnError && monitor.hasError()) { monitorThread.interrupt(); - System.exit(monitor.errorCode); + return monitor.errorCode; } currentTimeLength = System.currentTimeMillis() - startTime; if (currentTimeLength > this.timeout) { @@ -256,7 +256,7 @@ public final class Canary implements Tool { if (this.failOnError && monitor.hasError()) { monitorThread.interrupt(); - System.exit(monitor.errorCode); + return monitor.errorCode; } Thread.sleep(interval); @@ -265,7 +265,7 @@ public final class Canary implements Tool { return(monitor.errorCode); } - private void printUsageAndExit() { + private int printUsageAndExit() { System.err.printf( "Usage: bin/hbase %s [opts] [table1 [table2]...] | [regionserver1 [regionserver2]..]%n", getClass().getName()); @@ -280,7 +280,7 @@ public final class Canary implements Tool { System.err.println(" -f stop whole program if first error occurs," + " default is true"); System.err.println(" -t timeout for a check, default is 600000 (milisecs)"); - System.exit(USAGE_EXIT_CODE); + return USAGE_EXIT_CODE; } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanary.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanary.java new file mode 100644 index 0000000..d85e791 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanary.java @@ -0,0 +1,272 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.tool; + +import java.io.IOException; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.LargeTests; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.ScannerCallable; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.util.ToolRunner; +import org.apache.log4j.Level; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Run tests for {@link Canary}. + * Sets up the HBase mini cluster once at start and runs through all client tests. + */ +@Category(LargeTests.class) +public class TestCanary { + static final Log LOG = LogFactory.getLog(TestCanary.class.getName()); + protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + protected static int SLAVES = 3; + + private static final String TEST_TABLE_01_S = "test-01"; + private static final String TEST_TABLE_02_S = "test-02"; + + private static final String TEST_CF_01_S = "cf1"; + private static final String TEST_CF_02_S = "cf2"; + + private static final String TEST_CQ_01_S = "c1"; + private static final String TEST_CQ_02_S = "c2"; + + private static final byte[] TEST_TABLE_01_B = Bytes.toBytes(TEST_TABLE_01_S); + private static final byte[] TEST_TABLE_02_B = Bytes.toBytes(TEST_TABLE_02_S); + + private static final byte[] TEST_CF_01_B = Bytes.toBytes(TEST_CF_01_S); + private static final byte[] TEST_CF_02_B = Bytes.toBytes(TEST_CF_02_S); + + private static final byte[] TEST_CQ_01_B = Bytes.toBytes(TEST_CQ_01_S); + private static final byte[] TEST_CQ_02_B = Bytes.toBytes(TEST_CQ_02_S); + + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + ((Log4JLogger) RpcServer.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger) RpcClient.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger) ScannerCallable.LOG).getLogger().setLevel(Level.ALL); + // We need more than one region server in this test + TEST_UTIL.startMiniCluster(SLAVES); + + // initial test env. + // create test tables + createTestTable(TEST_TABLE_01_B); + createTestTable(TEST_TABLE_02_B); + + // generate dummy test data, then split the table + generateTestDummyDataAndSplit(TEST_TABLE_01_B, 1000, true); + generateTestDummyDataAndSplit(TEST_TABLE_02_B, 1000, true); + + // reassign splitted regions to spread on every regionserver + reassignTestTableRegions(TEST_TABLE_01_B, 0, 1); + reassignTestTableRegions(TEST_TABLE_02_B, 2, 0); + } + + /** + * @throws java.lang.Exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + /** + * @throws java.lang.Exception + */ + @Before + public void setUp() throws Exception { + } + + /** + * @throws java.lang.Exception + */ + @After + public void tearDown() throws Exception { + } + + @Test + public void testDefault() throws Exception { + Canary canary = new Canary(); + canary.setConf(TEST_UTIL.getConfiguration()); + int retCode = ToolRunner.run(canary, null); + Assert.assertEquals(0, retCode); + } + + @Test + public void testTable() throws Exception { + Canary canary = new Canary(); + canary.setConf(TEST_UTIL.getConfiguration()); + int retCode = ToolRunner.run(canary, new String[] { TEST_TABLE_01_S }); + Assert.assertEquals(0, retCode); + } + + @Test + public void testRegionserver() throws Exception { + Canary canary = new Canary(); + canary.setConf(TEST_UTIL.getConfiguration()); + int retCode = ToolRunner.run(canary, new String[] { "-regionserver" }); + Assert.assertEquals(0, retCode); + } + + @Test + public void testOneRegionserver() throws Exception { + Canary canary = new Canary(); + canary.setConf(TEST_UTIL.getConfiguration()); + HRegionServer server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0); + String hostName = server.getServerName().getHostname(); + int retCode = ToolRunner.run(canary, new String[] { "-regionserver", hostName }); + Assert.assertEquals(0, retCode); + } + + private static void createTestTable(byte[] tableName) throws IOException { + HBaseAdmin admin = null; + HTableDescriptor td = null; + HColumnDescriptor hcd = null; + try { + admin = TEST_UTIL.getHBaseAdmin(); + td = new HTableDescriptor(TableName.valueOf(tableName)); + hcd = new HColumnDescriptor(TEST_CF_01_B); + td.addFamily(hcd); + + hcd = new HColumnDescriptor(TEST_CF_02_B); + td.addFamily(hcd); + + admin.createTable(td); + } catch (IOException e) { + LOG.error("createTestTables failed", e); + throw e; + } + } + + private static void generateTestDummyDataAndSplit(byte[] tableName, int count, boolean split) + throws IOException, InterruptedException { + HTable table = null; + Put put = null; + HBaseAdmin admin = null; + TableName tblName = TableName.valueOf(tableName); + + try { + // generate test dummy data + table = new HTable(TEST_UTIL.getConfiguration(), tblName); + table.setAutoFlush(false, false); + for (int a = 0; a < count; a++) { + put = new Put(Bytes.toBytes(("row" + String.format("%05d", a)))); + put.add(TEST_CF_01_B, TEST_CQ_01_B, Bytes.toBytes("test_1_1")); + put.add(TEST_CF_01_B, TEST_CQ_02_B, Bytes.toBytes("test_1_2")); + put.add(TEST_CF_02_B, TEST_CQ_01_B, Bytes.toBytes("test_2_1")); + put.add(TEST_CF_02_B, TEST_CQ_02_B, Bytes.toBytes("test_2_2")); + table.put(put); + } + table.flushCommits(); + } catch (IOException e) { + e.printStackTrace(System.err); + throw e; + } finally { + table.close(); + } + TEST_UTIL.waitUntilAllRegionsAssigned(tblName); + MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster(); + try { + // split table + admin = TEST_UTIL.getHBaseAdmin(); + admin.split(tableName); + } catch (InterruptedException e) { + System.err.println("split table failed"); + e.printStackTrace(System.err); + throw e; + } + + do { + Thread.sleep(1); + } while (cluster.getRegions(tblName).size() != 2); + } + + private static void reassignTestTableRegions(byte[] tableName, int... newRsInices) + throws IOException, InterruptedException { + HBaseAdmin admin = null; + List regions = null; + HRegion region = null; + HRegionServer server = null; + MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster(); + + try { + admin = TEST_UTIL.getHBaseAdmin(); + } catch (IOException e) { + System.err.println("create HBaseAdmin failed"); + e.printStackTrace(System.err); + throw e; + } + + regions = cluster.getRegions(tableName); + Assert.assertEquals("regions and neRsIndices size not equal", regions.size(), + newRsInices.length); + + int newRsIdx = 0; + String regionEncodedName = null; + String serverName = null; + try { + for (int a = 0; a < newRsInices.length; a++) { + newRsIdx = newRsInices[a]; + region = regions.get(a); + server = cluster.getRegionServer(newRsIdx); + regionEncodedName = region.getRegionInfo().getEncodedName(); + serverName = server.getServerName().getServerName(); + System.out.println("move region:" + region + " to server:" + server); + admin.move(Bytes.toBytes(regionEncodedName), Bytes.toBytes(serverName)); + + do { + Thread.sleep(1); + } while (cluster.getServerWith(region.getRegionInfo().getRegionName()) != newRsIdx); + } + } catch (IOException e) { + System.err.println("move region failed"); + e.printStackTrace(System.err); + throw e; + } catch (InterruptedException e) { + System.err.println("move region interrupted"); + e.printStackTrace(System.err); + throw e; + } + } + +}