Index: src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java (revision 1329052) +++ src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java (working copy) @@ -152,6 +152,16 @@ conf.get(TEST_DIRECTORY_KEY, "target/test/data"), testName); } + /** + * You must call close on the returned region and then close on the log file + * it created. Do {@link HRegion#close()} followed by {@link HRegion#getLog()} + * and on it call close. + * @param desc + * @param startKey + * @param endKey + * @return An {@link HRegion} + * @throws IOException + */ protected HRegion createNewHRegion(HTableDescriptor desc, byte [] startKey, byte [] endKey) throws IOException { @@ -159,7 +169,6 @@ Path rootdir = filesystem.makeQualified( new Path(conf.get(HConstants.HBASE_DIR))); filesystem.mkdirs(rootdir); - return HRegion.createHRegion(new HRegionInfo(desc, startKey, endKey), rootdir, conf); } @@ -652,6 +661,11 @@ } } + /** + * You must call {@link #closeRootAndMeta()} when done after calling this + * method. It does cleanup. + * @throws IOException + */ protected void createRootAndMetaRegions() throws IOException { root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, testDir, conf); meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir, Index: src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java (revision 1329052) +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java (working copy) @@ -72,94 +72,97 @@ HRegion region = HRegion.createHRegion(info, HBaseTestingUtility.getTestDir(), TEST_UTIL .getConfiguration()); + try { + List rows = generateRandomWords(10, "row"); + List allColumns = generateRandomWords(10, "column"); + List values = generateRandomWords(100, "value"); - List rows = generateRandomWords(10, "row"); - List allColumns = generateRandomWords(10, "column"); - List values = generateRandomWords(100, "value"); + long maxTimestamp = 2; + double selectPercent = 0.5; + int numberOfTests = 5; + double flushPercentage = 0.2; + double minorPercentage = 0.2; + double majorPercentage = 0.2; + double putPercentage = 0.2; - long maxTimestamp = 2; - double selectPercent = 0.5; - int numberOfTests = 5; - double flushPercentage = 0.2; - double minorPercentage = 0.2; - double majorPercentage = 0.2; - double putPercentage = 0.2; + HashMap allKVMap = new HashMap(); - HashMap allKVMap = new HashMap(); + HashMap[] kvMaps = new HashMap[numberOfTests]; + ArrayList[] columnLists = new ArrayList[numberOfTests]; - HashMap[] kvMaps = new HashMap[numberOfTests]; - ArrayList[] columnLists = new ArrayList[numberOfTests]; - - for (int i = 0; i < numberOfTests; i++) { - kvMaps[i] = new HashMap(); - columnLists[i] = new ArrayList(); - for (String column : allColumns) { - if (Math.random() < selectPercent) { - columnLists[i].add(column); - } + for (int i = 0; i < numberOfTests; i++) { + kvMaps[i] = new HashMap(); + columnLists[i] = new ArrayList(); + for (String column : allColumns) { + if (Math.random() < selectPercent) { + columnLists[i].add(column); + } + } } - } - for (String value : values) { - for (String row : rows) { - Put p = new Put(Bytes.toBytes(row)); - for (String column : allColumns) { - for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) { - KeyValue kv = - KeyValueTestUtil.create(row, family, column, timestamp, value); - if (Math.random() < putPercentage) { - p.add(kv); - allKVMap.put(kv.getKeyString(), kv); - for (int i = 0; i < numberOfTests; i++) { - if (columnLists[i].contains(column)) { - kvMaps[i].put(kv.getKeyString(), kv); - } - } - } - } - } - region.put(p); - if (Math.random() < flushPercentage) { - LOG.info("Flushing... "); - region.flushcache(); - } + for (String value : values) { + for (String row : rows) { + Put p = new Put(Bytes.toBytes(row)); + for (String column : allColumns) { + for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) { + KeyValue kv = + KeyValueTestUtil.create(row, family, column, timestamp, value); + if (Math.random() < putPercentage) { + p.add(kv); + allKVMap.put(kv.getKeyString(), kv); + for (int i = 0; i < numberOfTests; i++) { + if (columnLists[i].contains(column)) { + kvMaps[i].put(kv.getKeyString(), kv); + } + } + } + } + } + region.put(p); + if (Math.random() < flushPercentage) { + LOG.info("Flushing... "); + region.flushcache(); + } - if (Math.random() < minorPercentage) { - LOG.info("Minor compacting... "); - region.compactStores(false); - } + if (Math.random() < minorPercentage) { + LOG.info("Minor compacting... "); + region.compactStores(false); + } - if (Math.random() < majorPercentage) { - LOG.info("Major compacting... "); - region.compactStores(true); - } + if (Math.random() < majorPercentage) { + LOG.info("Major compacting... "); + region.compactStores(true); + } + } } - } - for (int i = 0; i < numberOfTests + 1; i++) { - Collection kvSet; - Scan scan = new Scan(); - scan.setMaxVersions(); - if (i < numberOfTests) { - kvSet = kvMaps[i].values(); - for (String column : columnLists[i]) { - scan.addColumn(familyBytes, Bytes.toBytes(column)); - } - LOG.info("ExplicitColumns scanner"); - LOG.info("Columns: " + columnLists[i].size() + " Keys: " - + kvSet.size()); - } else { - kvSet = allKVMap.values(); - LOG.info("Wildcard scanner"); - LOG.info("Columns: " + allColumns.size() + " Keys: " + kvSet.size()); + for (int i = 0; i < numberOfTests + 1; i++) { + Collection kvSet; + Scan scan = new Scan(); + scan.setMaxVersions(); + if (i < numberOfTests) { + kvSet = kvMaps[i].values(); + for (String column : columnLists[i]) { + scan.addColumn(familyBytes, Bytes.toBytes(column)); + } + LOG.info("ExplicitColumns scanner"); + LOG.info("Columns: " + columnLists[i].size() + " Keys: " + + kvSet.size()); + } else { + kvSet = allKVMap.values(); + LOG.info("Wildcard scanner"); + LOG.info("Columns: " + allColumns.size() + " Keys: " + kvSet.size()); - } - InternalScanner scanner = region.getScanner(scan); - List results = new ArrayList(); - while (scanner.next(results)) - ; - assertEquals(kvSet.size(), results.size()); - assertTrue(results.containsAll(kvSet)); + } + InternalScanner scanner = region.getScanner(scan); + List results = new ArrayList(); + while (scanner.next(results)) + ; + assertEquals(kvSet.size(), results.size()); + assertTrue(results.containsAll(kvSet)); + } + } finally { + HRegion.closeHRegion(region); } } @@ -176,94 +179,97 @@ HRegion region = HRegion.createHRegion(info, HBaseTestingUtility.getTestDir(), TEST_UTIL .getConfiguration()); + try { + List rows = generateRandomWords(10, "row"); + List allColumns = generateRandomWords(100, "column"); - List rows = generateRandomWords(10, "row"); - List allColumns = generateRandomWords(100, "column"); + long maxTimestamp = 2; + double selectPercent = 0.5; + int numberOfTests = 5; + double flushPercentage = 0.2; + double minorPercentage = 0.2; + double majorPercentage = 0.2; + double putPercentage = 0.2; - long maxTimestamp = 2; - double selectPercent = 0.5; - int numberOfTests = 5; - double flushPercentage = 0.2; - double minorPercentage = 0.2; - double majorPercentage = 0.2; - double putPercentage = 0.2; + HashMap allKVMap = new HashMap(); - HashMap allKVMap = new HashMap(); + HashMap[] kvMaps = new HashMap[numberOfTests]; + ArrayList[] columnLists = new ArrayList[numberOfTests]; + String valueString = "Value"; - HashMap[] kvMaps = new HashMap[numberOfTests]; - ArrayList[] columnLists = new ArrayList[numberOfTests]; - String valueString = "Value"; - - for (int i = 0; i < numberOfTests; i++) { - kvMaps[i] = new HashMap(); - columnLists[i] = new ArrayList(); - for (String column : allColumns) { - if (Math.random() < selectPercent) { + for (int i = 0; i < numberOfTests; i++) { + kvMaps[i] = new HashMap(); + columnLists[i] = new ArrayList(); + for (String column : allColumns) { + if (Math.random() < selectPercent) { columnLists[i].add(column); - } + } + } } - } - for (String row : rows) { - Put p = new Put(Bytes.toBytes(row)); - for (String column : allColumns) { - for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) { - KeyValue kv = - KeyValueTestUtil.create(row, family, column, timestamp, - valueString); - if (Math.random() < putPercentage) { - p.add(kv); - allKVMap.put(kv.getKeyString(), kv); - for (int i = 0; i < numberOfTests; i++) { - if (columnLists[i].contains(column)) { - kvMaps[i].put(kv.getKeyString(), kv); - } - } - } + for (String row : rows) { + Put p = new Put(Bytes.toBytes(row)); + for (String column : allColumns) { + for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) { + KeyValue kv = + KeyValueTestUtil.create(row, family, column, timestamp, + valueString); + if (Math.random() < putPercentage) { + p.add(kv); + allKVMap.put(kv.getKeyString(), kv); + for (int i = 0; i < numberOfTests; i++) { + if (columnLists[i].contains(column)) { + kvMaps[i].put(kv.getKeyString(), kv); + } + } + } - } - } - region.put(p); - if (Math.random() < flushPercentage) { - LOG.info("Flushing... "); - region.flushcache(); - } + } + } + region.put(p); + if (Math.random() < flushPercentage) { + LOG.info("Flushing... "); + region.flushcache(); + } - if (Math.random() < minorPercentage) { - LOG.info("Minor compacting... "); - region.compactStores(false); - } + if (Math.random() < minorPercentage) { + LOG.info("Minor compacting... "); + region.compactStores(false); + } if (Math.random() < majorPercentage) { - LOG.info("Major compacting... "); - region.compactStores(true); + LOG.info("Major compacting... "); + region.compactStores(true); + } } - } - for (int i = 0; i < numberOfTests + 1; i++) { - Collection kvSet; - Scan scan = new Scan(); - scan.setMaxVersions(); - if (i < numberOfTests) { - kvSet = kvMaps[i].values(); - for (String column : columnLists[i]) { - scan.addColumn(familyBytes, Bytes.toBytes(column)); + for (int i = 0; i < numberOfTests + 1; i++) { + Collection kvSet; + Scan scan = new Scan(); + scan.setMaxVersions(); + if (i < numberOfTests) { + kvSet = kvMaps[i].values(); + for (String column : columnLists[i]) { + scan.addColumn(familyBytes, Bytes.toBytes(column)); + } + LOG.info("ExplicitColumns scanner"); + LOG.info("Columns: " + columnLists[i].size() + " Keys: " + + kvSet.size()); + } else { + kvSet = allKVMap.values(); + LOG.info("Wildcard scanner"); + LOG.info("Columns: " + allColumns.size() + " Keys: " + kvSet.size()); + } - LOG.info("ExplicitColumns scanner"); - LOG.info("Columns: " + columnLists[i].size() + " Keys: " - + kvSet.size()); - } else { - kvSet = allKVMap.values(); - LOG.info("Wildcard scanner"); - LOG.info("Columns: " + allColumns.size() + " Keys: " + kvSet.size()); - + InternalScanner scanner = region.getScanner(scan); + List results = new ArrayList(); + while (scanner.next(results)) + ; + assertEquals(kvSet.size(), results.size()); + assertTrue(results.containsAll(kvSet)); } - InternalScanner scanner = region.getScanner(scan); - List results = new ArrayList(); - while (scanner.next(results)) - ; - assertEquals(kvSet.size(), results.size()); - assertTrue(results.containsAll(kvSet)); + } finally { + HRegion.closeHRegion(region); } } Index: src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java (revision 1329052) +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java (working copy) @@ -70,31 +70,34 @@ } } HRegion region = HRegion.createHRegion(hri, path, conf); + try { + Increment odd = new Increment(rows[0]); + Increment even = new Increment(rows[0]); + Increment all = new Increment(rows[0]); + for (int i=0;i rows = generateRandomWords(100, "row"); - List columns = generateRandomWords(10000, "column"); - long maxTimestamp = 2; + List rows = generateRandomWords(100, "row"); + List columns = generateRandomWords(10000, "column"); + long maxTimestamp = 2; - List kvList = new ArrayList(); + List kvList = new ArrayList(); - Map> prefixMap = new HashMap>(); + Map> prefixMap = new HashMap>(); - prefixMap.put("p", new ArrayList()); - prefixMap.put("s", new ArrayList()); + prefixMap.put("p", new ArrayList()); + prefixMap.put("s", new ArrayList()); - String valueString = "ValueString"; + String valueString = "ValueString"; - for (String row: rows) { - Put p = new Put(Bytes.toBytes(row)); - for (String column: columns) { - for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) { - KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, - valueString); - p.add(kv); - kvList.add(kv); - for (String s: prefixMap.keySet()) { - if (column.startsWith(s)) { - prefixMap.get(s).add(kv); - } - } - } + for (String row: rows) { + Put p = new Put(Bytes.toBytes(row)); + for (String column: columns) { + for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) { + KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, + valueString); + p.add(kv); + kvList.add(kv); + for (String s: prefixMap.keySet()) { + if (column.startsWith(s)) { + prefixMap.get(s).add(kv); + } + } + } + } + region.put(p); } - region.put(p); - } - ColumnPrefixFilter filter; - Scan scan = new Scan(); - scan.setMaxVersions(); - for (String s: prefixMap.keySet()) { - filter = new ColumnPrefixFilter(Bytes.toBytes(s)); + ColumnPrefixFilter filter; + Scan scan = new Scan(); + scan.setMaxVersions(); + for (String s: prefixMap.keySet()) { + filter = new ColumnPrefixFilter(Bytes.toBytes(s)); - scan.setFilter(filter); + scan.setFilter(filter); - InternalScanner scanner = region.getScanner(scan); - List results = new ArrayList(); - while(scanner.next(results)); - assertEquals(prefixMap.get(s).size(), results.size()); + InternalScanner scanner = region.getScanner(scan); + List results = new ArrayList(); + while(scanner.next(results)); + assertEquals(prefixMap.get(s).size(), results.size()); + } + } finally { + HRegion.closeHRegion(region); } } @@ -109,54 +113,57 @@ HRegionInfo info = new HRegionInfo(htd, null, null, false); HRegion region = HRegion.createHRegion(info, HBaseTestingUtility. getTestDir(), TEST_UTIL.getConfiguration()); + try { + List rows = generateRandomWords(100, "row"); + List columns = generateRandomWords(10000, "column"); + long maxTimestamp = 2; - List rows = generateRandomWords(100, "row"); - List columns = generateRandomWords(10000, "column"); - long maxTimestamp = 2; + List kvList = new ArrayList(); - List kvList = new ArrayList(); - - Map> prefixMap = new HashMap> prefixMap = new HashMap>(); - prefixMap.put("p", new ArrayList()); - prefixMap.put("s", new ArrayList()); + prefixMap.put("p", new ArrayList()); + prefixMap.put("s", new ArrayList()); - String valueString = "ValueString"; + String valueString = "ValueString"; - for (String row: rows) { - Put p = new Put(Bytes.toBytes(row)); - for (String column: columns) { - for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) { - KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, - valueString); - p.add(kv); - kvList.add(kv); - for (String s: prefixMap.keySet()) { - if (column.startsWith(s)) { - prefixMap.get(s).add(kv); - } - } - } + for (String row: rows) { + Put p = new Put(Bytes.toBytes(row)); + for (String column: columns) { + for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) { + KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp, + valueString); + p.add(kv); + kvList.add(kv); + for (String s: prefixMap.keySet()) { + if (column.startsWith(s)) { + prefixMap.get(s).add(kv); + } + } + } + } + region.put(p); } - region.put(p); - } - ColumnPrefixFilter filter; - Scan scan = new Scan(); - scan.setMaxVersions(); - for (String s: prefixMap.keySet()) { - filter = new ColumnPrefixFilter(Bytes.toBytes(s)); + ColumnPrefixFilter filter; + Scan scan = new Scan(); + scan.setMaxVersions(); + for (String s: prefixMap.keySet()) { + filter = new ColumnPrefixFilter(Bytes.toBytes(s)); - //this is how this test differs from the one above - FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL); - filterList.addFilter(filter); - scan.setFilter(filterList); + //this is how this test differs from the one above + FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL); + filterList.addFilter(filter); + scan.setFilter(filterList); - InternalScanner scanner = region.getScanner(scan); - List results = new ArrayList(); - while(scanner.next(results)); - assertEquals(prefixMap.get(s).size(), results.size()); + InternalScanner scanner = region.getScanner(scan); + List results = new ArrayList(); + while(scanner.next(results)); + assertEquals(prefixMap.get(s).size(), results.size()); + } + } finally { + HRegion.closeHRegion(region); } } Index: src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java (revision 1329052) +++ src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java (working copy) @@ -163,6 +163,7 @@ protected void tearDown() throws Exception { this.region.close(); + this.region.getLog().closeAndDelete(); super.tearDown(); } Index: src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java (revision 1329052) +++ src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java (working copy) @@ -84,6 +84,7 @@ protected void tearDown() throws Exception { super.tearDown(); this.region.close(); + this.region.getLog().closeAndDelete(); } private void addData() throws IOException { Index: src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java (revision 1329052) +++ src/test/java/org/apache/hadoop/hbase/master/TestOpenedRegionHandler.java (working copy) @@ -118,6 +118,7 @@ @Test public void testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches() throws Exception { + HRegion region = null; try { TEST_UTIL.startMiniZKCluster(); final Server server = new MockServer(); @@ -125,8 +126,7 @@ "testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches"); final HRegionInfo hri = new HRegionInfo(htd, HConstants.EMPTY_END_ROW, HConstants.EMPTY_END_ROW); - HRegion region = HRegion.createHRegion(hri, HBaseTestingUtility - .getTestDir(), server.getConfiguration()); + region = HRegion.createHRegion(hri, HBaseTestingUtility.getTestDir(), server.getConfiguration()); assertNotNull(region); AssignmentManager am = Mockito.mock(AssignmentManager.class); when(am.isRegionInTransition(hri)).thenReturn( @@ -165,6 +165,8 @@ assertEquals("The region should not be opened successfully.", regionName, region.getRegionInfo().getEncodedName()); } finally { + region.close(); + region.getLog().closeAndDelete(); TEST_UTIL.shutdownMiniZKCluster(); } } Index: src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java (revision 1329052) +++ src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java (working copy) @@ -61,6 +61,51 @@ public class TestMasterFailover { private static final Log LOG = LogFactory.getLog(TestMasterFailover.class); + @Test (timeout=180000) + public void testShouldCheckMasterFailOverWhenMETAIsInOpenedState() + throws Exception { + final int NUM_MASTERS = 1; + final int NUM_RS = 2; + + Configuration conf = HBaseConfiguration.create(); + conf.setInt("hbase.master.assignment.timeoutmonitor.period", 2000); + conf.setInt("hbase.master.assignment.timeoutmonitor.timeout", 8000); + // Start the cluster + HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf); + TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS); + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + + // Find regionserver carrying meta. + List regionServerThreads = cluster.getRegionServerThreads(); + int count = -1; + HRegion metaRegion = null; + for (RegionServerThread regionServerThread : regionServerThreads) { + HRegionServer regionServer = regionServerThread.getRegionServer(); + metaRegion = regionServer.getOnlineRegion(HRegionInfo.FIRST_META_REGIONINFO.getRegionName()); + count++; + regionServer.abort(""); + if (null != metaRegion) break; + } + HRegionServer regionServer = cluster.getRegionServer(count); + + TEST_UTIL.shutdownMiniHBaseCluster(); + + // Create a ZKW to use in the test + ZooKeeperWatcher zkw = + HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL, + metaRegion, regionServer.getServerName()); + + LOG.info("Staring cluster for second time"); + TEST_UTIL.startMiniHBaseCluster(1, 1); + + // Failover should be completed, now wait for no RIT + log("Waiting for no more RIT"); + ZKAssign.blockUntilNoRIT(zkw); + + // Stop the cluster + TEST_UTIL.shutdownMiniCluster(); + } + /** * Simple test of master failover. *

@@ -142,63 +187,9 @@ TEST_UTIL.shutdownMiniCluster(); } - @Test - public void testShouldCheckMasterFailOverWhenMETAIsInOpenedState() - throws Exception { - final int NUM_MASTERS = 1; - final int NUM_RS = 2; - Configuration conf = HBaseConfiguration.create(); - conf.setInt("hbase.master.assignment.timeoutmonitor.period", 2000); - conf.setInt("hbase.master.assignment.timeoutmonitor.timeout", 8000); - // Start the cluster - HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf); - TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS); - MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); - // get all the master threads - List masterThreads = cluster.getMasterThreads(); - // wait for each to come online - for (MasterThread mt : masterThreads) { - assertTrue(mt.isAlive()); - } - assertEquals(NUM_MASTERS, masterThreads.size()); - assertEquals(1, masterThreads.size()); - - List regionServerThreads = cluster - .getRegionServerThreads(); - int count = -1; - HRegion metaRegion = null; - for (RegionServerThread regionServerThread : regionServerThreads) { - HRegionServer regionServer = regionServerThread.getRegionServer(); - metaRegion = regionServer - .getOnlineRegion(HRegionInfo.FIRST_META_REGIONINFO.getRegionName()); - count++; - regionServer.abort(""); - if (null != metaRegion) { - break; - } - } - HRegionServer regionServer = cluster.getRegionServer(count); - - cluster.shutdown(); - ZooKeeperWatcher zkw = - HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL, - metaRegion, regionServer.getServerName()); - - TEST_UTIL.startMiniHBaseCluster(1, 1); - - // Failover should be completed, now wait for no RIT - log("Waiting for no more RIT"); - ZKAssign.blockUntilNoRIT(zkw); - - // Stop the cluster - TEST_UTIL.shutdownMiniCluster(); - } - - - /** * Complex test of master failover that tests as many permutations of the * different possible states that regions in transition could be in within ZK. Index: src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java (revision 1329052) +++ src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java (working copy) @@ -165,8 +165,8 @@ HRegion.addRegionToMETA(meta, r); } meta.close(); - meta.getLog().closeAndDelete(); + if (meta.getLog() != null) meta.getLog().closeAndDelete(); root.close(); - root.getLog().closeAndDelete(); + if (root.getLog() != null) root.getLog().closeAndDelete(); } } Index: src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java (revision 1329052) +++ src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java (working copy) @@ -163,6 +163,13 @@ @Override public void tearDown() throws Exception { super.tearDown(); + for (int i = 0; i < sourceRegions.length; i++) { + HRegion r = regions[i]; + if (r != null) { + r.close(); + if (r.getLog() != null) r.getLog().close(); + } + } shutdownDfs(dfsCluster); } Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 1329052) +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy) @@ -2584,7 +2584,11 @@ * bootstrap code in the HMaster constructor. * Note, this method creates an {@link HLog} for the created region. It * needs to be closed explicitly. Use {@link HRegion#getLog()} to get - * access. + * access. When done with a region created using this method, you will + * need to explicitly close the {@link HLog} it created too; it will not be + * done for you. Not closing the log will leave at least a daemon thread + * running. Call {@link #closeHRegion(HRegion)} and it will do + * necessary cleanup for you. * @param info Info for region to create. * @param rootDir Root directory for HBase instance * @param conf @@ -2608,6 +2612,23 @@ return region; } + /** + * This will do the necessary cleanup a call to {@link #createHRegion(HRegionInfo, Path, Configuration, HTableDescriptor)} + * requires. This method will close the region and then close its + * associated {@link HLog} file. You use it if you call the other createHRegion, + * the one that takes an {@link HLog} instance but don't be surprised by the + * call to the {@link HLog#closeAndDelete()} on the {@link HLog} the + * HRegion was carrying. + * @param r + * @throws IOException + */ + public static void closeHRegion(final HRegion r) throws IOException { + if (r == null) return; + r.close(); + if (r.getLog() == null) return; + r.getLog().closeAndDelete(); + } + /** * Open a Region. * @param info Info for region to be opened. Index: src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java (revision 1329052) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java (working copy) @@ -22,6 +22,7 @@ import java.lang.ref.WeakReference; import java.nio.ByteBuffer; import java.util.LinkedList; +import java.util.List; import java.util.PriorityQueue; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; @@ -504,16 +505,17 @@ */ private static class EvictionThread extends Thread { private WeakReference cache; + private boolean go = true; public EvictionThread(LruBlockCache cache) { - super("LruBlockCache.EvictionThread"); + super(Thread.currentThread().getName() + ".LruBlockCache.EvictionThread"); setDaemon(true); this.cache = new WeakReference(cache); } @Override public void run() { - while(true) { + while (this.go) { synchronized(this) { try { this.wait(); @@ -524,11 +526,17 @@ cache.evict(); } } + public void evict() { synchronized(this) { this.notify(); // FindBugs NN_NAKED_NOTIFY } } + + void shutdown() { + this.go = false; + interrupt(); + } } /* @@ -711,5 +719,20 @@ public void shutdown() { this.scheduleThreadPool.shutdown(); + for (int i = 0; i < 10; i++) { + if (!this.scheduleThreadPool.isShutdown()) { + try { + Thread.sleep(10); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + break; + } + } + } + if (!this.scheduleThreadPool.isShutdown()) { + List runnables = this.scheduleThreadPool.shutdownNow(); + LOG.debug("Still running " + runnables); + } + this.evictionThread.shutdown(); } } Index: src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java (revision 1329052) +++ src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java (working copy) @@ -187,7 +187,7 @@ } // Wait for an active master while (true) { - for (JVMClusterUtil.MasterThread t : masters) { + for (JVMClusterUtil.MasterThread t: masters) { if (t.master.isActiveMaster()) { return t.master.getMasterAddress().toString(); } @@ -208,13 +208,17 @@ final List regionservers) { LOG.debug("Shutting down HBase Cluster"); if (masters != null) { + // Do backups first. + JVMClusterUtil.MasterThread activeMaster = null; for (JVMClusterUtil.MasterThread t : masters) { - if (t.master.isActiveMaster()) { - t.master.shutdown(); + if (!t.master.isActiveMaster()) { + t.master.stopMaster(); } else { - t.master.stopMaster(); + activeMaster = t; } } + // Do active after. + if (activeMaster != null) activeMaster.master.shutdown(); } // regionServerThreads can never be null because they are initialized when // the class is constructed. Index: src/main/java/org/apache/hadoop/hbase/client/HTable.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/client/HTable.java (revision 1329052) +++ src/main/java/org/apache/hadoop/hbase/client/HTable.java (working copy) @@ -1342,7 +1342,7 @@ SecurityManager s = System.getSecurityManager(); group = (s != null)? s.getThreadGroup() : Thread.currentThread().getThreadGroup(); - namePrefix = "pool-" + + namePrefix = "htable-pool-" + poolNumber.getAndIncrement() + "-thread-"; }