diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java index e9e8bdc..9b69830 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java @@ -27,11 +27,13 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -61,6 +63,39 @@ public class MasterProcedureTestingUtility { private MasterProcedureTestingUtility() { } + // ========================================================================== + // Master failover utils + // ========================================================================== + public static void masterFailover(final HBaseTestingUtility testUtil) + throws Exception { + MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster(); + + // Kill the master + HMaster oldMaster = cluster.getMaster(); + cluster.killMaster(cluster.getMaster().getServerName()); + + // Wait the secondary + waitBackupMaster(testUtil, oldMaster); + } + + public static void waitBackupMaster(final HBaseTestingUtility testUtil, + final HMaster oldMaster) throws Exception { + MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster(); + + HMaster newMaster = cluster.getMaster(); + while (newMaster == null || newMaster == oldMaster) { + Thread.sleep(250); + newMaster = cluster.getMaster(); + } + + while (!(newMaster.isActiveMaster() && newMaster.isInitialized())) { + Thread.sleep(250); + } + } + + // ========================================================================== + // Table Helpers + // ========================================================================== public static HTableDescriptor createHTD(final TableName tableName, final String... family) { HTableDescriptor htd = new HTableDescriptor(tableName); for (int i = 0; i < family.length; ++i) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java index 9dd5065..e949fc5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java @@ -22,9 +22,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import java.io.IOException; -import java.util.concurrent.CountDownLatch; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -33,14 +30,9 @@ import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; -import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure; -import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState; @@ -58,7 +50,6 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestRule; -import org.mockito.Mockito; @Category({MasterTests.class, LargeTests.class}) public class TestMasterFailoverWithProcedures { @@ -97,144 +88,6 @@ public class TestMasterFailoverWithProcedures { } } - @Test - public void testWalRecoverLease() throws Exception { - final ProcedureStore masterStore = getMasterProcedureExecutor().getStore(); - assertTrue("expected WALStore for this test", masterStore instanceof WALProcedureStore); - - HMaster firstMaster = UTIL.getHBaseCluster().getMaster(); - // Abort Latch for the master store - final CountDownLatch masterStoreAbort = new CountDownLatch(1); - masterStore.registerListener(new ProcedureStore.ProcedureStoreListener() { - @Override - public void postSync() {} - - @Override - public void abortProcess() { - LOG.debug("Abort store of Master"); - masterStoreAbort.countDown(); - } - }); - - // startup a fake master the new WAL store will take the lease - // and the active master should abort. - HMaster backupMaster3 = Mockito.mock(HMaster.class); - Mockito.doReturn(firstMaster.getConfiguration()).when(backupMaster3).getConfiguration(); - Mockito.doReturn(true).when(backupMaster3).isActiveMaster(); - final WALProcedureStore backupStore3 = new WALProcedureStore(firstMaster.getConfiguration(), - firstMaster.getMasterFileSystem().getFileSystem(), - ((WALProcedureStore)masterStore).getLogDir(), - new MasterProcedureEnv.WALStoreLeaseRecovery(backupMaster3)); - // Abort Latch for the test store - final CountDownLatch backupStore3Abort = new CountDownLatch(1); - backupStore3.registerListener(new ProcedureStore.ProcedureStoreListener() { - @Override - public void postSync() {} - - @Override - public void abortProcess() { - LOG.debug("Abort store of backupMaster3"); - backupStore3Abort.countDown(); - backupStore3.stop(true); - } - }); - backupStore3.start(1); - backupStore3.recoverLease(); - - // Try to trigger a command on the master (WAL lease expired on the active one) - HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(TableName.valueOf("mtb"), "f"); - HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null); - LOG.debug("submit proc"); - try { - getMasterProcedureExecutor().submitProcedure( - new CreateTableProcedure(getMasterProcedureExecutor().getEnvironment(), htd, regions)); - fail("expected RuntimeException 'sync aborted'"); - } catch (RuntimeException e) { - LOG.info("got " + e.getMessage()); - } - LOG.debug("wait master store abort"); - masterStoreAbort.await(); - - // Now the real backup master should start up - LOG.debug("wait backup master to startup"); - waitBackupMaster(UTIL, firstMaster); - assertEquals(true, firstMaster.isStopped()); - - // wait the store in here to abort (the test will fail due to timeout if it doesn't) - LOG.debug("wait the store to abort"); - backupStore3.getStoreTracker().setDeleted(1, false); - try { - backupStore3.delete(1); - fail("expected RuntimeException 'sync aborted'"); - } catch (RuntimeException e) { - LOG.info("got " + e.getMessage()); - } - backupStore3Abort.await(); - } - - /** - * Tests proper fencing in case the current WAL store is fenced - */ - @Test - public void testWALfencingWithoutWALRolling() throws IOException { - testWALfencing(false); - } - - /** - * Tests proper fencing in case the current WAL store does not receive writes until after the - * new WAL does a couple of WAL rolls. - */ - @Test - public void testWALfencingWithWALRolling() throws IOException { - testWALfencing(true); - } - - public void testWALfencing(boolean walRolls) throws IOException { - final ProcedureStore procStore = getMasterProcedureExecutor().getStore(); - assertTrue("expected WALStore for this test", procStore instanceof WALProcedureStore); - - HMaster firstMaster = UTIL.getHBaseCluster().getMaster(); - - // cause WAL rolling after a delete in WAL: - firstMaster.getConfiguration().setLong(WALProcedureStore.ROLL_THRESHOLD_CONF_KEY, 1); - - HMaster backupMaster3 = Mockito.mock(HMaster.class); - Mockito.doReturn(firstMaster.getConfiguration()).when(backupMaster3).getConfiguration(); - Mockito.doReturn(true).when(backupMaster3).isActiveMaster(); - final WALProcedureStore procStore2 = new WALProcedureStore(firstMaster.getConfiguration(), - firstMaster.getMasterFileSystem().getFileSystem(), - ((WALProcedureStore)procStore).getLogDir(), - new MasterProcedureEnv.WALStoreLeaseRecovery(backupMaster3)); - - // start a second store which should fence the first one out - LOG.info("Starting new WALProcedureStore"); - procStore2.start(1); - procStore2.recoverLease(); - - // before writing back to the WAL store, optionally do a couple of WAL rolls (which causes - // to delete the old WAL files). - if (walRolls) { - LOG.info("Inserting into second WALProcedureStore, causing WAL rolls"); - for (int i = 0; i < 512; i++) { - // insert something to the second store then delete it, causing a WAL roll(s) - Procedure proc2 = new TestProcedure(i); - procStore2.insert(proc2, null); - procStore2.delete(proc2.getProcId()); // delete the procedure so that the WAL is removed later - } - } - - // Now, insert something to the first store, should fail. - // If the store does a WAL roll and continue with another logId without checking higher logIds - // it will incorrectly succeed. - LOG.info("Inserting into first WALProcedureStore"); - try { - procStore.insert(new TestProcedure(11), null); - fail("Inserting into Procedure Store should have failed"); - } catch (Exception ex) { - LOG.info("Received expected exception", ex); - } - } - // ========================================================================== // Test Create Table // ========================================================================== @@ -349,10 +202,10 @@ public class TestMasterFailoverWithProcedures { UTIL.waitUntilAllRegionsAssigned(tableName); // validate the table regions and layout + regions = UTIL.getHBaseAdmin().getTableRegions(tableName).toArray(new HRegionInfo[0]); if (preserveSplits) { - assertEquals(1 + splitKeys.length, UTIL.getHBaseAdmin().getTableRegions(tableName).size()); + assertEquals(1 + splitKeys.length, regions.length); } else { - regions = UTIL.getHBaseAdmin().getTableRegions(tableName).toArray(new HRegionInfo[1]); assertEquals(1, regions.length); } MasterProcedureTestingUtility.validateTableCreation( @@ -458,7 +311,7 @@ public class TestMasterFailoverWithProcedures { ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId); LOG.info("Trigger master failover"); - masterFailover(testUtil); + MasterProcedureTestingUtility.masterFailover(testUtil); procExec = testUtil.getHBaseCluster().getMaster().getMasterProcedureExecutor(); ProcedureTestingUtility.waitProcedure(procExec, procId); @@ -466,39 +319,8 @@ public class TestMasterFailoverWithProcedures { } // ========================================================================== - // Master failover utils - // ========================================================================== - public static void masterFailover(final HBaseTestingUtility testUtil) - throws Exception { - MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster(); - - // Kill the master - HMaster oldMaster = cluster.getMaster(); - cluster.killMaster(cluster.getMaster().getServerName()); - - // Wait the secondary - waitBackupMaster(testUtil, oldMaster); - } - - public static void waitBackupMaster(final HBaseTestingUtility testUtil, - final HMaster oldMaster) throws Exception { - MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster(); - - HMaster newMaster = cluster.getMaster(); - while (newMaster == null || newMaster == oldMaster) { - Thread.sleep(250); - newMaster = cluster.getMaster(); - } - - while (!(newMaster.isActiveMaster() && newMaster.isInitialized())) { - Thread.sleep(250); - } - } - - // ========================================================================== // Helpers // ========================================================================== - private ProcedureExecutor getMasterProcedureExecutor() { return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWAL.java new file mode 100644 index 0000000..a392c7c --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureWAL.java @@ -0,0 +1,230 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.util.concurrent.CountDownLatch; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CategoryBasedTimeout; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; +import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.ModifyRegionUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestRule; +import org.mockito.Mockito; + +@Category({MasterTests.class, LargeTests.class}) +public class TestMasterProcedureWAL { + private static final Log LOG = LogFactory.getLog(TestMasterProcedureWAL.class); + + @ClassRule + public static final TestRule timeout = + CategoryBasedTimeout.forClass(TestMasterProcedureWAL.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private static void setupConf(Configuration conf) { + // don't waste time retrying with the roll, the test is already slow enough. + conf.setInt(WALProcedureStore.MAX_RETRIES_BEFORE_ROLL_CONF_KEY, 1); + conf.setInt(WALProcedureStore.WAIT_BEFORE_ROLL_CONF_KEY, 0); + conf.setInt(WALProcedureStore.ROLL_RETRIES_CONF_KEY, 1); + conf.setInt(WALProcedureStore.MAX_SYNC_FAILURE_ROLL_CONF_KEY, 1); + } + + @Before + public void setup() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(2, 1); + } + + @After + public void tearDown() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Test + public void testWalRecoverLease() throws Exception { + final ProcedureStore masterStore = getMasterProcedureExecutor().getStore(); + assertTrue("expected WALStore for this test", masterStore instanceof WALProcedureStore); + + HMaster firstMaster = UTIL.getHBaseCluster().getMaster(); + // Abort Latch for the master store + final CountDownLatch masterStoreAbort = new CountDownLatch(1); + masterStore.registerListener(new ProcedureStore.ProcedureStoreListener() { + @Override + public void postSync() {} + + @Override + public void abortProcess() { + LOG.debug("Abort store of Master"); + masterStoreAbort.countDown(); + } + }); + + // startup a fake master the new WAL store will take the lease + // and the active master should abort. + HMaster backupMaster3 = Mockito.mock(HMaster.class); + Mockito.doReturn(firstMaster.getConfiguration()).when(backupMaster3).getConfiguration(); + Mockito.doReturn(true).when(backupMaster3).isActiveMaster(); + final WALProcedureStore backupStore3 = new WALProcedureStore(firstMaster.getConfiguration(), + firstMaster.getMasterFileSystem().getFileSystem(), + ((WALProcedureStore)masterStore).getLogDir(), + new MasterProcedureEnv.WALStoreLeaseRecovery(backupMaster3)); + // Abort Latch for the test store + final CountDownLatch backupStore3Abort = new CountDownLatch(1); + backupStore3.registerListener(new ProcedureStore.ProcedureStoreListener() { + @Override + public void postSync() {} + + @Override + public void abortProcess() { + LOG.debug("Abort store of backupMaster3"); + backupStore3Abort.countDown(); + backupStore3.stop(true); + } + }); + backupStore3.start(1); + backupStore3.recoverLease(); + + // Try to trigger a command on the master (WAL lease expired on the active one) + HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(TableName.valueOf("mtb"), "f"); + HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null); + LOG.debug("submit proc"); + try { + getMasterProcedureExecutor().submitProcedure( + new CreateTableProcedure(getMasterProcedureExecutor().getEnvironment(), htd, regions)); + fail("expected RuntimeException 'sync aborted'"); + } catch (RuntimeException e) { + LOG.info("got " + e.getMessage()); + } + LOG.debug("wait master store abort"); + masterStoreAbort.await(); + + // Now the real backup master should start up + LOG.debug("wait backup master to startup"); + MasterProcedureTestingUtility.waitBackupMaster(UTIL, firstMaster); + assertEquals(true, firstMaster.isStopped()); + + // wait the store in here to abort (the test will fail due to timeout if it doesn't) + LOG.debug("wait the store to abort"); + backupStore3.getStoreTracker().setDeleted(1, false); + try { + backupStore3.delete(1); + fail("expected RuntimeException 'sync aborted'"); + } catch (RuntimeException e) { + LOG.info("got " + e.getMessage()); + } + backupStore3Abort.await(); + } + + /** + * Tests proper fencing in case the current WAL store is fenced + */ + @Test + public void testWALfencingWithoutWALRolling() throws IOException { + testWALfencing(false); + } + + /** + * Tests proper fencing in case the current WAL store does not receive writes until after the + * new WAL does a couple of WAL rolls. + */ + @Test + public void testWALfencingWithWALRolling() throws IOException { + testWALfencing(true); + } + + public void testWALfencing(boolean walRolls) throws IOException { + final ProcedureStore procStore = getMasterProcedureExecutor().getStore(); + assertTrue("expected WALStore for this test", procStore instanceof WALProcedureStore); + + HMaster firstMaster = UTIL.getHBaseCluster().getMaster(); + + // cause WAL rolling after a delete in WAL: + firstMaster.getConfiguration().setLong(WALProcedureStore.ROLL_THRESHOLD_CONF_KEY, 1); + + HMaster backupMaster3 = Mockito.mock(HMaster.class); + Mockito.doReturn(firstMaster.getConfiguration()).when(backupMaster3).getConfiguration(); + Mockito.doReturn(true).when(backupMaster3).isActiveMaster(); + final WALProcedureStore procStore2 = new WALProcedureStore(firstMaster.getConfiguration(), + firstMaster.getMasterFileSystem().getFileSystem(), + ((WALProcedureStore)procStore).getLogDir(), + new MasterProcedureEnv.WALStoreLeaseRecovery(backupMaster3)); + + // start a second store which should fence the first one out + LOG.info("Starting new WALProcedureStore"); + procStore2.start(1); + procStore2.recoverLease(); + + // before writing back to the WAL store, optionally do a couple of WAL rolls (which causes + // to delete the old WAL files). + if (walRolls) { + LOG.info("Inserting into second WALProcedureStore, causing WAL rolls"); + for (int i = 0; i < 512; i++) { + // insert something to the second store then delete it, causing a WAL roll(s) + Procedure proc2 = new TestProcedure(i); + procStore2.insert(proc2, null); + procStore2.delete(proc2.getProcId()); // delete the procedure so that the WAL is removed later + } + } + + // Now, insert something to the first store, should fail. + // If the store does a WAL roll and continue with another logId without checking higher logIds + // it will incorrectly succeed. + LOG.info("Inserting into first WALProcedureStore"); + try { + procStore.insert(new TestProcedure(11), null); + fail("Inserting into Procedure Store should have failed"); + } catch (Exception ex) { + LOG.info("Received expected exception", ex); + } + } + + // ========================================================================== + // Helpers + // ========================================================================== + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java index 89a82a7..9516dfa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java @@ -120,6 +120,7 @@ public class TestRegionServerMetrics { metricsRegionServer.getRegionServerWrapper().forceRecompute(); tableName = TableName.valueOf(testName.getMethodName()); table = TEST_UTIL.createTable(tableName, cf); + Threads.sleepWithoutInterrupt(10000); } @After @@ -201,77 +202,7 @@ public class TestRegionServerMetrics { metricsHelper.assertGauge("regionCount", 1, serverSource); } - @Test - public void testLocalFiles() throws Exception { - assertGauge("percentFilesLocal", 0); - assertGauge("percentFilesLocalSecondaryRegions", 0); - } - - @Test - public void testRequestCount() throws Exception { - // Do a first put to be sure that the connection is established, meta is there and so on. - doNPuts(1, false); - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - long requests = metricsHelper.getCounter("totalRequestCount", serverSource); - long readRequests = metricsHelper.getCounter("readRequestCount", serverSource); - long writeRequests = metricsHelper.getCounter("writeRequestCount", serverSource); - - doNPuts(30, false); - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - assertCounter("totalRequestCount", requests + 30); - assertCounter("readRequestCount", readRequests); - assertCounter("writeRequestCount", writeRequests + 30); - - doNGets(10, false); - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - assertCounter("totalRequestCount", requests + 40); - assertCounter("readRequestCount", readRequests + 10); - assertCounter("writeRequestCount", writeRequests + 30); - - assertRegionMetrics("getNumOps", 10); - assertRegionMetrics("mutateCount", 31); - - doNGets(10, true); // true = batch - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - assertCounter("totalRequestCount", requests + 50); - assertCounter("readRequestCount", readRequests + 20); - assertCounter("writeRequestCount", writeRequests + 30); - - doNPuts(30, true); - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - assertCounter("totalRequestCount", requests + 80); - assertCounter("readRequestCount", readRequests + 20); - assertCounter("writeRequestCount", writeRequests + 60); - } - - @Test - public void testGet() throws Exception { - // Do a first put to be sure that the connection is established, meta is there and so on. - doNPuts(1, false); - doNGets(10, false); - assertRegionMetrics("getNumOps", 10); - assertRegionMetrics("getSizeNumOps", 10); - metricsHelper.assertCounterGt("Get_num_ops", 10, serverSource); - } - - @Test - public void testMutationsWithoutWal() throws Exception { - Put p = new Put(row).addColumn(cf, qualifier, val) - .setDurability(Durability.SKIP_WAL); - table.put(p); - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - assertGauge("mutationsWithoutWALCount", 1); - long minLength = row.length + cf.length + qualifier.length + val.length; - metricsHelper.assertGaugeGt("mutationsWithoutWALSize", minLength, serverSource); - } - - @Test + //@Test public void testStoreCount() throws Exception { //Force a hfile. doNPuts(1, false); @@ -293,241 +224,4 @@ public class TestRegionServerMetrics { assertTrue(metricsHelper.getGaugeLong("minStoreFileAge", serverSource) > 0); assertTrue(metricsHelper.getGaugeLong("avgStoreFileAge", serverSource) > 0); } - - @Test - public void testCheckAndPutCount() throws Exception { - byte[] valOne = Bytes.toBytes("Value"); - byte[] valTwo = Bytes.toBytes("ValueTwo"); - byte[] valThree = Bytes.toBytes("ValueThree"); - - Put p = new Put(row); - p.addColumn(cf, qualifier, valOne); - table.put(p); - - Put pTwo = new Put(row); - pTwo.addColumn(cf, qualifier, valTwo); - table.checkAndPut(row, cf, qualifier, valOne, pTwo); - - Put pThree = new Put(row); - pThree.addColumn(cf, qualifier, valThree); - table.checkAndPut(row, cf, qualifier, valOne, pThree); - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - assertCounter("checkMutateFailedCount", 1); - assertCounter("checkMutatePassedCount", 1); - } - - @Test - public void testIncrement() throws Exception { - Put p = new Put(row).addColumn(cf, qualifier, Bytes.toBytes(0L)); - table.put(p); - - for(int count = 0; count < 13; count++) { - Increment inc = new Increment(row); - inc.addColumn(cf, qualifier, 100); - table.increment(inc); - } - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - assertCounter("incrementNumOps", 13); - } - - @Test - public void testAppend() throws Exception { - doNPuts(1, false); - - for(int count = 0; count< 73; count++) { - Append append = new Append(row); - append.add(cf, qualifier, Bytes.toBytes(",Test")); - table.append(append); - } - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - assertCounter("appendNumOps", 73); - } - - @Test - public void testScanSize() throws Exception { - doNPuts(100, true); // batch put - Scan s = new Scan(); - s.setBatch(1); - s.setCaching(1); - ResultScanner resultScanners = table.getScanner(s); - - for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) { - Result result = resultScanners.next(); - assertNotNull(result); - assertEquals(1, result.size()); - } - numScanNext += NUM_SCAN_NEXT; - assertRegionMetrics("scanSizeNumOps", NUM_SCAN_NEXT); - assertCounter("ScanSize_num_ops", numScanNext); - } - - @Test - public void testScanTime() throws Exception { - doNPuts(100, true); - Scan s = new Scan(); - s.setBatch(1); - s.setCaching(1); - ResultScanner resultScanners = table.getScanner(s); - - for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) { - Result result = resultScanners.next(); - assertNotNull(result); - assertEquals(1, result.size()); - } - numScanNext += NUM_SCAN_NEXT; - assertRegionMetrics("scanTimeNumOps", NUM_SCAN_NEXT); - assertCounter("ScanTime_num_ops", numScanNext); - } - - @Test - public void testScanSizeForSmallScan() throws Exception { - doNPuts(100, true); - Scan s = new Scan(); - s.setSmall(true); - s.setCaching(1); - ResultScanner resultScanners = table.getScanner(s); - - for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) { - Result result = resultScanners.next(); - assertNotNull(result); - assertEquals(1, result.size()); - } - numScanNext += NUM_SCAN_NEXT; - assertRegionMetrics("scanSizeNumOps", NUM_SCAN_NEXT); - assertCounter("ScanSize_num_ops", numScanNext); - } - - @Test - public void testMobMetrics() throws IOException, InterruptedException { - TableName tableName = TableName.valueOf("testMobMetricsLocal"); - int numHfiles = 5; - HTableDescriptor htd = new HTableDescriptor(tableName); - HColumnDescriptor hcd = new HColumnDescriptor(cf); - hcd.setMobEnabled(true); - hcd.setMobThreshold(0); - htd.addFamily(hcd); - byte[] val = Bytes.toBytes("mobdata"); - try { - Table table = TEST_UTIL.createTable(htd, new byte[0][0], conf); - Region region = rs.getOnlineRegions(tableName).get(0); - for (int insertCount = 0; insertCount < numHfiles; insertCount++) { - Put p = new Put(Bytes.toBytes(insertCount)); - p.addColumn(cf, qualifier, val); - table.put(p); - admin.flush(tableName); - } - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - assertCounter("mobFlushCount", numHfiles); - - Scan scan = new Scan(Bytes.toBytes(0), Bytes.toBytes(numHfiles)); - ResultScanner scanner = table.getScanner(scan); - scanner.next(100); - numScanNext++; // this is an ugly construct - scanner.close(); - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - assertCounter("mobScanCellsCount", numHfiles); - - region.getTableDesc().getFamily(cf).setMobThreshold(100); - // metrics are reset by the region initialization - ((HRegion) region).initialize(); - region.compact(true); - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - assertCounter("cellsCountCompactedFromMob", numHfiles); - assertCounter("cellsCountCompactedToMob", 0); - - scanner = table.getScanner(scan); - scanner.next(100); - numScanNext++; // this is an ugly construct - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - assertCounter("mobScanCellsCount", 0); - - for (int insertCount = numHfiles; insertCount < 2 * numHfiles; insertCount++) { - Put p = new Put(Bytes.toBytes(insertCount)); - p.addColumn(cf, qualifier, val); - table.put(p); - admin.flush(tableName); - } - region.getTableDesc().getFamily(cf).setMobThreshold(0); - - // closing the region forces the compaction.discharger to archive the compacted hfiles - ((HRegion) region).close(); - - // metrics are reset by the region initialization - ((HRegion) region).initialize(); - region.compact(true); - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - // metrics are reset by the region initialization - assertCounter("cellsCountCompactedFromMob", 0); - assertCounter("cellsCountCompactedToMob", 2 * numHfiles); - } finally { - admin.disableTable(tableName); - admin.deleteTable(tableName); - } - } - - @Test - @Ignore - public void testRangeCountMetrics() throws Exception { - final long[] timeranges = - { 1, 3, 10, 30, 100, 300, 1000, 3000, 10000, 30000, 60000, 120000, 300000, 600000 }; - final String timeRangeType = "TimeRangeCount"; - final String timeRangeMetricName = "Mutate"; - boolean timeRangeCountUpdated = false; - - // Do a first put to be sure that the connection is established, meta is there and so on. - Put p = new Put(row); - p.addColumn(cf, qualifier, val); - table.put(p); - - // do some puts and gets - for (int i = 0; i < 10; i++) { - table.put(p); - } - - Get g = new Get(row); - for (int i = 0; i < 10; i++) { - table.get(g); - } - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - - // Check some time range counters were updated - long prior = 0; - - String dynamicMetricName; - for (int i = 0; i < timeranges.length; i++) { - dynamicMetricName = - timeRangeMetricName + "_" + timeRangeType + "_" + prior + "-" + timeranges[i]; - if (metricsHelper.checkCounterExists(dynamicMetricName, serverSource)) { - long count = metricsHelper.getGaugeLong(dynamicMetricName, serverSource); - if (count > 0) { - timeRangeCountUpdated = true; - break; - } - } - prior = timeranges[i]; - } - dynamicMetricName = - timeRangeMetricName + "_" + timeRangeType + "_" + timeranges[timeranges.length - 1] + "-inf"; - if (metricsHelper.checkCounterExists(dynamicMetricName, serverSource)) { - long count = metricsHelper.getCounter(dynamicMetricName, serverSource); - if (count > 0) { - timeRangeCountUpdated = true; - } - } - assertEquals(true, timeRangeCountUpdated); - } - - @Test - public void testAverageRegionSize() throws Exception { - //Force a hfile. - doNPuts(1, false); - TEST_UTIL.getHBaseAdmin().flush(tableName); - - metricsRegionServer.getRegionServerWrapper().forceRecompute(); - assertTrue(metricsHelper.getGaugeDouble("averageRegionSize", serverSource) > 0.0); - } }