diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index 2a3b229..727c6d8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -111,10 +111,10 @@ public final class Canary implements Tool { } } - private static final int USAGE_EXIT_CODE = 1; - private static final int INIT_ERROR_EXIT_CODE = 2; - private static final int TIMEOUT_ERROR_EXIT_CODE = 3; - private static final int ERROR_EXIT_CODE = 4; + static final int USAGE_EXIT_CODE = 1; + static final int INIT_ERROR_EXIT_CODE = 2; + static final int TIMEOUT_ERROR_EXIT_CODE = 3; + static final int ERROR_EXIT_CODE = 4; private static final long DEFAULT_INTERVAL = 6000; @@ -161,12 +161,12 @@ public final class Canary implements Tool { if (index >= 0) { // command line args must be in the form: [opts] [table 1 [table 2 ...]] System.err.println("Invalid command line options"); - printUsageAndExit(); + return printUsageAndExit(); } if (cmd.equals("-help")) { // user asked for help, print the help and quit. - printUsageAndExit(); + return printUsageAndExit(); } else if (cmd.equals("-daemon") && interval == 0) { // user asked for daemon mode, set a default interval between checks interval = DEFAULT_INTERVAL; @@ -176,14 +176,14 @@ public final class Canary implements Tool { if (i == args.length) { System.err.println("-interval needs a numeric value argument."); - printUsageAndExit(); + return printUsageAndExit(); } try { interval = Long.parseLong(args[i]) * 1000; } catch (NumberFormatException e) { System.err.println("-interval needs a numeric value argument."); - printUsageAndExit(); + return printUsageAndExit(); } } else if(cmd.equals("-regionserver")) { this.regionServerMode = true; @@ -194,14 +194,14 @@ public final class Canary implements Tool { if (i == args.length) { System.err.println("-t needs a numeric value argument."); - printUsageAndExit(); + return printUsageAndExit(); } try { this.timeout = Long.parseLong(args[i]); } catch (NumberFormatException e) { System.err.println("-t needs a numeric value argument."); - printUsageAndExit(); + return printUsageAndExit(); } } else if (cmd.equals("-f")) { @@ -210,14 +210,14 @@ public final class Canary implements Tool { if (i == args.length) { System.err .println("-f needs a boolean value argument (true|false)."); - printUsageAndExit(); + return printUsageAndExit(); } this.failOnError = Boolean.parseBoolean(args[i]); } else { // no options match System.err.println(cmd + " options is invalid."); - printUsageAndExit(); + return printUsageAndExit(); } } else if (index < 0) { // keep track of first table name specified by the user @@ -243,7 +243,7 @@ public final class Canary implements Tool { // exit if any error occurs if (this.failOnError && monitor.hasError()) { monitorThread.interrupt(); - System.exit(monitor.errorCode); + return monitor.errorCode; } currentTimeLength = System.currentTimeMillis() - startTime; if (currentTimeLength > this.timeout) { @@ -257,7 +257,7 @@ public final class Canary implements Tool { if (this.failOnError && monitor.hasError()) { monitorThread.interrupt(); - System.exit(monitor.errorCode); + return monitor.errorCode; } Thread.sleep(interval); @@ -266,7 +266,7 @@ public final class Canary implements Tool { return(monitor.errorCode); } - private void printUsageAndExit() { + private int printUsageAndExit() { System.err.printf( "Usage: bin/hbase %s [opts] [table1 [table2]...] | [regionserver1 [regionserver2]..]%n", getClass().getName()); @@ -281,7 +281,7 @@ public final class Canary implements Tool { System.err.println(" -f stop whole program if first error occurs," + " default is true"); System.err.println(" -t timeout for a check, default is 600000 (milisecs)"); - System.exit(USAGE_EXIT_CODE); + return USAGE_EXIT_CODE; } /** @@ -382,6 +382,9 @@ public final class Canary implements Tool { } else { sniff(); } + } catch (TableNotFoundException e) { + LOG.error(e); + this.errorCode = INIT_ERROR_EXIT_CODE; } catch (Exception e) { LOG.error("Run regionMonitor failed", e); this.errorCode = ERROR_EXIT_CODE; @@ -449,6 +452,7 @@ public final class Canary implements Tool { /** * Canary entry point for specified table. + * @throws TableNotFoundException tableName is not available * @throws Exception */ private static void sniff(final HBaseAdmin admin, final Sink sink, String tableName) @@ -456,7 +460,9 @@ public final class Canary implements Tool { if (admin.isTableAvailable(tableName)) { sniff(admin, sink, admin.getTableDescriptor(tableName.getBytes())); } else { - LOG.warn(String.format("Table %s is not available", tableName)); + String msg = String.format("Table %s is not available", tableName); + LOG.warn(msg); + throw new TableNotFoundException(msg); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanary.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanary.java new file mode 100644 index 0000000..71cf4e8 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanary.java @@ -0,0 +1,550 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.tool; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.LargeTests; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.ScannerCallable; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.tool.Canary.RegionServerStdOutSink; +import org.apache.hadoop.hbase.tool.Canary.Sink; +import org.apache.hadoop.hbase.tool.Canary.StdOutSink; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.JVMClusterUtil; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.util.ToolRunner; +import org.apache.log4j.Level; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Run tests for {@link Canary}. + * Sets up the HBase mini cluster once at start and runs through all client tests. + */ +@Category(LargeTests.class) +public class TestCanary { + static final Log LOG = LogFactory.getLog(TestCanary.class.getName()); + protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + protected static int SLAVES = 3; + + private static final String TEST_TABLE_01_S = "test-01"; + private static final String TEST_TABLE_02_S = "test-02"; + + private static final String TEST_CF_01_S = "cf1"; + private static final String TEST_CF_02_S = "cf2"; + + private static final String TEST_CQ_01_S = "c1"; + private static final String TEST_CQ_02_S = "c2"; + + private static final byte[] TEST_TABLE_01_B = Bytes.toBytes(TEST_TABLE_01_S); + private static final byte[] TEST_TABLE_02_B = Bytes.toBytes(TEST_TABLE_02_S); + + private static final byte[] TEST_CF_01_B = Bytes.toBytes(TEST_CF_01_S); + private static final byte[] TEST_CF_02_B = Bytes.toBytes(TEST_CF_02_S); + + private static final byte[] TEST_CQ_01_B = Bytes.toBytes(TEST_CQ_01_S); + private static final byte[] TEST_CQ_02_B = Bytes.toBytes(TEST_CQ_02_S); + + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + initialTestCluster(); + } + + private static void initialTestCluster() throws Exception, IOException, InterruptedException { + ((Log4JLogger) RpcServer.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger) RpcClient.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger) ScannerCallable.LOG).getLogger().setLevel(Level.ALL); + // We need more than one region server in this test + TEST_UTIL.startMiniCluster(SLAVES); + + // initial test env. + // create test tables + createTestTable(TEST_TABLE_01_B); + createTestTable(TEST_TABLE_02_B); + + // generate dummy test data, then split the table + generateTestDummyDataAndSplit(TEST_TABLE_01_B, 1000, true); + generateTestDummyDataAndSplit(TEST_TABLE_02_B, 1000, true); + + // reassign splitted regions to spread on every regionserver + // | rs | test-01 | test-02 | + // | rs1 | r1 | r2 | + // | rs2 | r2 | | + // | rs3 | | r1 | + reassignTestTableRegions(TEST_TABLE_01_B, 0, 1); + reassignTestTableRegions(TEST_TABLE_02_B, 2, 0); + } + + /** + * @throws java.lang.Exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + /** + * @throws java.lang.Exception + */ + @Before + public void setUp() throws Exception { + } + + /** + * @throws java.lang.Exception + */ + @After + public void tearDown() throws Exception { + } + + @Test + public void testHelp() throws Exception { + Canary canary = new Canary(); + canary.setConf(TEST_UTIL.getConfiguration()); + int retCode = ToolRunner.run(canary, new String[] { "-help" }); + Assert.assertEquals(Canary.USAGE_EXIT_CODE, retCode); + } + + @Test + public void testDefault() throws Exception { + final RegionAndCfsResultValidator validator = + new RegionAndCfsResultValidator(TEST_TABLE_01_S, TEST_TABLE_02_S); + + // do mock + Sink sink = new StdOutSink() { + @Override + public void publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime) { + super.publishReadTiming(region, column, msTime); + validator.putRegionAndCf(region.getRegionNameAsString(), column.getNameAsString()); + } + }; + Canary canary = new Canary(sink); + canary.setConf(TEST_UTIL.getConfiguration()); + int retCode = ToolRunner.run(canary, null); + Assert.assertEquals(0, retCode); + Assert.assertTrue(validator.isPass()); + } + + @Test + public void testTable() throws Exception { + final RegionAndCfsResultValidator validator = new RegionAndCfsResultValidator(TEST_TABLE_01_S); + + // do mock + Sink sink = new StdOutSink() { + @Override + public void publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime) { + super.publishReadTiming(region, column, msTime); + validator.putRegionAndCf(region.getRegionNameAsString(), column.getNameAsString()); + } + }; + + Canary canary = new Canary(sink); + canary.setConf(TEST_UTIL.getConfiguration()); + int retCode = ToolRunner.run(canary, new String[] { TEST_TABLE_01_S }); + Assert.assertEquals(0, retCode); + Assert.assertTrue(validator.isPass()); + } + + @Test + public void testTableFailed() throws Exception { + Canary canary = new Canary(); + canary.setConf(TEST_UTIL.getConfiguration()); + int retCode = ToolRunner.run(canary, new String[] { "test-03" }); + Assert.assertEquals(Canary.INIT_ERROR_EXIT_CODE, retCode); + } + + @Test + public void testTableRegExp() throws Exception { + final RegionAndCfsResultValidator validator = + new RegionAndCfsResultValidator(TEST_TABLE_01_S, TEST_TABLE_02_S); + + // do mock + Sink sink = new StdOutSink() { + @Override + public void publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime) { + super.publishReadTiming(region, column, msTime); + validator.putRegionAndCf(region.getRegionNameAsString(), column.getNameAsString()); + } + }; + Canary canary = new Canary(sink); + canary.setConf(TEST_UTIL.getConfiguration()); + String regexp = generateRegExpPattern(TEST_TABLE_01_S, 0.7D); + int retCode = ToolRunner.run(canary, new String[] { "-e", regexp }); + Assert.assertEquals(0, retCode); + Assert.assertTrue(validator.isPass()); + } + + @Test + public void testTableRegExpFailed() throws Exception { + Canary canary = new Canary(); + canary.setConf(TEST_UTIL.getConfiguration()); + int retCode = ToolRunner.run(canary, new String[] { "-e", "test-03" }); + Assert.assertEquals(Canary.INIT_ERROR_EXIT_CODE, retCode); + } + + @Test + public void testRegionservers() throws Exception { + final RsResultValidator validator = + new RsResultValidator(TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads()); + // do mock + Sink sink = new RegionServerStdOutSink() { + + @Override + public void publishReadTiming(String table, String server, long msTime) { + super.publishReadTiming(table, server, msTime); + validator.putServerName(server); + } + }; + Canary canary = new Canary(sink); + canary.setConf(TEST_UTIL.getConfiguration()); + int retCode = ToolRunner.run(canary, new String[] { "-regionserver" }); + Assert.assertEquals(0, retCode); + Assert.assertTrue(validator.isPass()); + } + + @Test + public void testRegionserverRegExp() throws Exception { + MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster(); + final RsResultValidator validator = new RsResultValidator(cluster.getRegionServerThreads()); + // do mock + Sink sink = new RegionServerStdOutSink() { + + @Override + public void publishReadTiming(String table, String server, long msTime) { + super.publishReadTiming(table, server, msTime); + validator.putServerName(server); + } + }; + String regExp = + generateRegExpPattern(cluster.getRegionServer(0).getServerName().getHostname(), 0.5D); + Canary canary = new Canary(sink); + canary.setConf(TEST_UTIL.getConfiguration()); + int retCode = ToolRunner.run(canary, new String[] { "-e", "-regionserver", regExp }); + Assert.assertEquals(0, retCode); + Assert.assertTrue(validator.isPass()); + } + + @Test + public void testOneRegionserver() throws Exception { + HRegionServer server = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0); + final RsResultValidator validator = new RsResultValidator(server); + // do mock + Sink sink = new RegionServerStdOutSink() { + + @Override + public void publishReadTiming(String table, String server, long msTime) { + super.publishReadTiming(table, server, msTime); + validator.putServerName(server); + } + }; + + Canary canary = new Canary(sink); + canary.setConf(TEST_UTIL.getConfiguration()); + + String hostName = server.getServerName().getHostname(); + int retCode = ToolRunner.run(canary, new String[] { "-regionserver", hostName }); + Assert.assertEquals(0, retCode); + Assert.assertTrue(validator.isPass()); + } + + @Test + public void testRegionserverWithTableName() throws Exception { + Canary canary = new Canary(); + canary.setConf(TEST_UTIL.getConfiguration()); + + int retCode = ToolRunner.run(canary, new String[] { "-regionserver", TEST_TABLE_01_S }); + Assert.assertEquals(Canary.USAGE_EXIT_CODE, retCode); + } + + @Test + public void testTimeout() throws Exception { + Canary canary = new Canary(); + canary.setConf(TEST_UTIL.getConfiguration()); + MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster(); + ServerName serverName = null; + for (int a = 0; a < SLAVES; a++) { + serverName = cluster.getRegionServer(a).getServerName(); + try { + cluster.killRegionServer(serverName); + } catch (IOException e) { + System.err.println("kill server:" + serverName + " failed"); + e.printStackTrace(System.err); + throw e; + } + } + int retCode = ToolRunner.run(canary, new String[] { "-t", "1000" }); + Assert.assertEquals(Canary.TIMEOUT_ERROR_EXIT_CODE, retCode); + + // re-initial cluster for test + try { + } finally { + TEST_UTIL.shutdownMiniCluster(); + initialTestCluster(); + } + } + + @Test + public void testMasterNotRunning() throws Exception { + Canary canary = new Canary(); + Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); + conf.set(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "1"); + canary.setConf(conf); + MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster(); + cluster.killAll(); + Threads.sleep(5000); + int retCode = ToolRunner.run(canary, null); + Assert.assertEquals(Canary.ERROR_EXIT_CODE, retCode); + + // re-initial cluster for test + try { + } finally { + TEST_UTIL.shutdownMiniCluster(); + initialTestCluster(); + } + } + + private static String generateRegExpPattern(String name, double fraction) { + int endIdx = (int) (((double) name.length()) * fraction); + Assert.assertTrue(endIdx > 0); + return (name.substring(0, endIdx) + ".*"); + } + + private static void createTestTable(byte[] tableName) throws IOException, InterruptedException { + try { + TEST_UTIL.createTable(tableName, new byte[][]{TEST_CF_01_B, TEST_CF_02_B}); + TEST_UTIL.waitTableAvailable(tableName); + } catch (IOException e) { + LOG.error("createTestTables failed", e); + throw e; + } catch (InterruptedException e) { + LOG.error("waitTableAvailable failed", e); + throw e; + } + } + + private static void generateTestDummyDataAndSplit(byte[] tableName, int count, boolean split) + throws IOException, InterruptedException { + HTable table = null; + Put put = null; + HBaseAdmin admin = null; + TableName tblName = TableName.valueOf(tableName); + + try { + // generate test dummy data + table = new HTable(TEST_UTIL.getConfiguration(), tblName); + table.setAutoFlush(false, false); + for (int a = 0; a < count; a++) { + put = new Put(Bytes.toBytes(("row" + String.format("%05d", a)))); + put.add(TEST_CF_01_B, TEST_CQ_01_B, Bytes.toBytes("test_1_1")); + put.add(TEST_CF_01_B, TEST_CQ_02_B, Bytes.toBytes("test_1_2")); + put.add(TEST_CF_02_B, TEST_CQ_01_B, Bytes.toBytes("test_2_1")); + put.add(TEST_CF_02_B, TEST_CQ_02_B, Bytes.toBytes("test_2_2")); + table.put(put); + } + table.flushCommits(); + } catch (IOException e) { + e.printStackTrace(System.err); + throw e; + } finally { + table.close(); + } + TEST_UTIL.waitUntilAllRegionsAssigned(tblName); + + if (split) { + MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster(); + try { + // split table + admin = TEST_UTIL.getHBaseAdmin(); + admin.split(tableName); + } catch (InterruptedException e) { + System.err.println("split table failed"); + e.printStackTrace(System.err); + throw e; + } + + do { + Thread.sleep(1); + } while (cluster.getRegions(tblName).size() != 2); + } + } + + private static void reassignTestTableRegions(byte[] tableName, int... newRsInices) + throws IOException, InterruptedException { + HBaseAdmin admin = null; + List regions = null; + HRegion region = null; + HRegionServer server = null; + MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster(); + + try { + admin = TEST_UTIL.getHBaseAdmin(); + } catch (IOException e) { + System.err.println("create HBaseAdmin failed"); + e.printStackTrace(System.err); + throw e; + } + + regions = cluster.getRegions(tableName); + Assert.assertEquals("regions and neRsIndices size not equal", regions.size(), + newRsInices.length); + + int newRsIdx = 0; + String regionEncodedName = null; + String serverName = null; + try { + for (int a = 0; a < newRsInices.length; a++) { + newRsIdx = newRsInices[a]; + region = regions.get(a); + server = cluster.getRegionServer(newRsIdx); + regionEncodedName = region.getRegionInfo().getEncodedName(); + serverName = server.getServerName().getServerName(); + System.out.println("move region:" + region + " to server:" + server); + admin.move(Bytes.toBytes(regionEncodedName), Bytes.toBytes(serverName)); + + do { + Thread.sleep(1); + } while (cluster.getServerWith(region.getRegionInfo().getRegionName()) != newRsIdx); + } + } catch (IOException e) { + System.err.println("move region failed"); + e.printStackTrace(System.err); + throw e; + } catch (InterruptedException e) { + System.err.println("move region interrupted"); + e.printStackTrace(System.err); + throw e; + } + } + + private static class RsResultValidator implements ResultValidator { + Map serverNamesMap = new HashMap(); + + RsResultValidator(List rsts) { + HRegionServer rs = null; + for (JVMClusterUtil.RegionServerThread rst : rsts) { + rs = rst.getRegionServer(); + prepareServerNamesMap(rs); + } + } + + RsResultValidator(HRegionServer server) { + prepareServerNamesMap(server); + } + + private void prepareServerNamesMap(HRegionServer server) { + String serverName = server.getServerName().getHostname(); + serverNamesMap.put(serverName, Boolean.FALSE); + } + + void putServerName(String serverName) { + serverNamesMap.put(serverName, Boolean.TRUE); + } + + @Override + public boolean isPass() { + for (Boolean pass : serverNamesMap.values()) { + if (pass.equals(Boolean.FALSE)) return false; + } + return true; + } + + } + + private static class RegionAndCfsResultValidator implements ResultValidator { + Map> rAndCfsMap = new HashMap>(); + RegionAndCfsResultValidator(String... tableNames) { + for (String tableName : tableNames) { + prepareRegionAndCfs(rAndCfsMap, tableName); + } + Assert.assertTrue(rAndCfsMap.size() > 0); + } + + void putRegionAndCf(String region, String cf) { + Map cfsMap = rAndCfsMap.get(region); + if (cfsMap == null) return; + cfsMap.put(cf, Boolean.TRUE); + } + + @Override + public boolean isPass() { + for (Map cfsMap : rAndCfsMap.values()) { + for (Boolean pass : cfsMap.values()) { + if (pass.equals(Boolean.FALSE)) return false; + } + } + return true; + } + + private static void prepareRegionAndCfs(Map> rAndCfsMap, + String tableName) { + List regions = TEST_UTIL.getMiniHBaseCluster().getRegions(Bytes.toBytes(tableName)); + String regionName = null; + Map cfsMap = null; + HTableDescriptor td = null; + for (HRegion region : regions) { + regionName = region.getRegionNameAsString(); + cfsMap = rAndCfsMap.get(regionName); + if (cfsMap == null) { + cfsMap = new HashMap(); + rAndCfsMap.put(regionName, cfsMap); + } + td = region.getTableDesc(); + for (HColumnDescriptor cf : td.getColumnFamilies()) { + cfsMap.put(cf.getNameAsString(), Boolean.FALSE); + } + } + } + } + + private static interface ResultValidator { + boolean isPass(); + } + +}