Index: src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (revision 1005894) +++ src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (working copy) @@ -451,12 +451,27 @@ */ public HTable createTable(byte[] tableName, byte[][] families) throws IOException { + return createTable(tableName, families, + new Configuration(getConfiguration())); + } + + /** + * Create a table. + * @param tableName + * @param families + * @param c Configuration to use + * @return An HTable instance for the created table. + * @throws IOException + */ + public HTable createTable(byte[] tableName, byte[][] families, + final Configuration c) + throws IOException { HTableDescriptor desc = new HTableDescriptor(tableName); for(byte[] family : families) { desc.addFamily(new HColumnDescriptor(family)); } getHBaseAdmin().createTable(desc); - return new HTable(new Configuration(getConfiguration()), tableName); + return new HTable(c, tableName); } /** Index: src/test/java/org/apache/hadoop/hbase/HBaseClusterTestCase.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/HBaseClusterTestCase.java (revision 1005894) +++ src/test/java/org/apache/hadoop/hbase/HBaseClusterTestCase.java (working copy) @@ -25,6 +25,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.client.HConnectionManager; @@ -102,13 +103,13 @@ this.zooKeeperCluster = new MiniZooKeeperCluster(); int clientPort = this.zooKeeperCluster.startup(testDir); conf.set("hbase.zookeeper.property.clientPort", Integer.toString(clientPort)); - + Configuration c = new Configuration(this.conf); // start the mini cluster - this.cluster = new MiniHBaseCluster(conf, regionServers); + this.cluster = new MiniHBaseCluster(c, regionServers); if (openMetaTable) { // opening the META table ensures that cluster is running - new HTable(conf, HConstants.META_TABLE_NAME); + new HTable(c, HConstants.META_TABLE_NAME); } } Index: src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java (revision 1005894) +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java (working copy) @@ -19,6 +19,17 @@ */ package org.apache.hadoop.hbase.regionserver; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; @@ -50,28 +61,15 @@ import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; -import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PairOfSameType; import org.apache.hadoop.hbase.util.Threads; -import com.google.common.base.Joiner; import com.google.common.collect.Lists; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; - /** * Basic stand-alone testing of HRegion. * Index: src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java (revision 1005894) +++ src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java (working copy) @@ -253,9 +253,12 @@ MiniHBaseCluster.MiniHBaseClusterMaster.class, MiniHBaseCluster.MiniHBaseClusterRegionServer.class); hbaseCluster.startup(); - } catch(IOException e) { + } catch (IOException e) { shutdown(); throw e; + } catch (Throwable t) { + shutdown(); + throw new IOException("Shutting down", t); } } Index: src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java (revision 1005894) +++ src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java (working copy) @@ -168,19 +168,14 @@ // Make a key that does not have a regioninfo value. kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.REGIONINFO_QUALIFIER, f)); - boolean exception = false; - try { - CatalogJanitor.getHRegionInfo(new Result(kvs)); - } catch (Exception ioe) { - exception = true; - } - assertTrue(exception); + HRegionInfo hri = CatalogJanitor.getHRegionInfo(new Result(kvs)); + assertTrue(hri == null); // OK, give it what it expects kvs.clear(); kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(HRegionInfo.FIRST_META_REGIONINFO))); - HRegionInfo hri = CatalogJanitor.getHRegionInfo(new Result(kvs)); + hri = CatalogJanitor.getHRegionInfo(new Result(kvs)); assertNotNull(hri); assertTrue(hri.equals(HRegionInfo.FIRST_META_REGIONINFO)); } Index: src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java (revision 1005894) +++ src/test/java/org/apache/hadoop/hbase/master/TestDeadServer.java (working copy) @@ -17,25 +17,22 @@ */ package org.apache.hadoop.hbase.master; -import static org.junit.Assert.*; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; -import java.util.HashSet; -import java.util.Set; - import org.junit.Test; public class TestDeadServer { @Test public void testIsDead() { - Set deadServers = new HashSet(); DeadServer ds = new DeadServer(); - final String hostname123 = "one,123,3"; + final String hostname123 = "127.0.0.1,123,3"; assertFalse(ds.isDeadServer(hostname123, false)); assertFalse(ds.isDeadServer(hostname123, true)); - deadServers.add(hostname123); + ds.add(hostname123); assertTrue(ds.isDeadServer(hostname123, false)); - assertFalse(ds.isDeadServer("one:1", true)); - assertFalse(ds.isDeadServer("one:1234", true)); - assertTrue(ds.isDeadServer("one:123", true)); + assertFalse(ds.isDeadServer("127.0.0.1:1", true)); + assertFalse(ds.isDeadServer("127.0.0.1:1234", true)); + assertTrue(ds.isDeadServer("127.0.0.1:123", true)); } } \ No newline at end of file Index: src/test/java/org/apache/hadoop/hbase/master/TestLoadBalancer.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/master/TestLoadBalancer.java (revision 1005894) +++ src/test/java/org/apache/hadoop/hbase/master/TestLoadBalancer.java (working copy) @@ -352,7 +352,7 @@ server.getLoad().setNumberOfRegions(numRegions); return server; } - String host = RandomStringUtils.random(16); + String host = "127.0.0.1"; int port = rand.nextInt(60000); long startCode = rand.nextLong(); HServerInfo hsi = Index: src/test/java/org/apache/hadoop/hbase/client/TestGetRowVersions.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/client/TestGetRowVersions.java (revision 1005894) +++ src/test/java/org/apache/hadoop/hbase/client/TestGetRowVersions.java (working copy) @@ -24,6 +24,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClusterTestCase; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; @@ -43,35 +44,34 @@ private static final byte [] VALUE2 = Bytes.toBytes("value2"); private static final long TIMESTAMP1 = 100L; private static final long TIMESTAMP2 = 200L; - private HBaseAdmin admin = null; - private HTable table = null; @Override public void setUp() throws Exception { super.setUp(); HTableDescriptor desc = new HTableDescriptor(TABLE_NAME); desc.addFamily(new HColumnDescriptor(CONTENTS)); - this.admin = new HBaseAdmin(conf); - this.admin.createTable(desc); - this.table = new HTable(conf, TABLE_NAME); + HBaseAdmin admin = new HBaseAdmin(conf); + admin.createTable(desc); } /** @throws Exception */ public void testGetRowMultipleVersions() throws Exception { Put put = new Put(ROW, TIMESTAMP1, null); put.add(CONTENTS, CONTENTS, VALUE1); - this.table.put(put); + HTable table = new HTable(new Configuration(conf), TABLE_NAME); + table.put(put); // Shut down and restart the HBase cluster this.cluster.shutdown(); this.zooKeeperCluster.shutdown(); LOG.debug("HBase cluster shut down -- restarting"); this.hBaseClusterSetup(); - // Make a new connection - this.table = new HTable(conf, TABLE_NAME); + // Make a new connection. Use new Configuration instance because old one + // is tied to an HConnection that has since gone statle. + table = new HTable(new Configuration(conf), TABLE_NAME); // Overwrite previous value put = new Put(ROW, TIMESTAMP2, null); put.add(CONTENTS, CONTENTS, VALUE2); - this.table.put(put); + table.put(put); // Now verify that getRow(row, column, latest) works Get get = new Get(ROW); // Should get one version by default Index: src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java (revision 1005894) +++ src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java (working copy) @@ -19,6 +19,9 @@ */ package org.apache.hadoop.hbase.client; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -26,15 +29,11 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.util.Bytes; -import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - /** * Test various scanner timeout issues. */ @@ -47,8 +46,7 @@ private final static byte[] SOME_BYTES = Bytes.toBytes("f"); private final static byte[] TABLE_NAME = Bytes.toBytes("t"); private final static int NB_ROWS = 10; - private final static int SCANNER_TIMEOUT = 1000; - private static HTable table; + private final static int SCANNER_TIMEOUT = 10000; /** * @throws java.lang.Exception @@ -58,7 +56,7 @@ Configuration c = TEST_UTIL.getConfiguration(); c.setInt("hbase.regionserver.lease.period", SCANNER_TIMEOUT); TEST_UTIL.startMiniCluster(2); - table = TEST_UTIL.createTable(Bytes.toBytes("t"), SOME_BYTES); + HTable table = TEST_UTIL.createTable(TABLE_NAME, SOME_BYTES); for (int i = 0; i < NB_ROWS; i++) { Put put = new Put(Bytes.toBytes(i)); put.add(SOME_BYTES, SOME_BYTES, SOME_BYTES); @@ -89,6 +87,8 @@ @Test public void test2481() throws Exception { Scan scan = new Scan(); + HTable table = + new HTable(new Configuration(TEST_UTIL.getConfiguration()), TABLE_NAME); ResultScanner r = table.getScanner(scan); int count = 0; try { @@ -131,6 +131,5 @@ Result[] results = r.next(NB_ROWS); assertEquals(NB_ROWS, results.length); r.close(); - } -} +} \ No newline at end of file Index: src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java (revision 1005894) +++ src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java (working copy) @@ -122,7 +122,7 @@ byte[] TABLE = Bytes.toBytes("testGetConfiguration"); byte[][] FAMILIES = new byte[][] { Bytes.toBytes("foo") }; Configuration conf = TEST_UTIL.getConfiguration(); - HTable table = TEST_UTIL.createTable(TABLE, FAMILIES); + HTable table = TEST_UTIL.createTable(TABLE, FAMILIES, conf); assertSame(conf, table.getConfiguration()); } Index: src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (revision 1005894) +++ src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (working copy) @@ -184,7 +184,7 @@ assert !this.parent.lock.writeLock().isHeldByCurrentThread() : "Unsafe to hold write lock while performing RPCs"; // If true, no cluster to write meta edits into. - boolean testing = + boolean testing = server == null? true: server.getConfiguration().getBoolean("hbase.testing.nocluster", false); createSplitDir(this.parent.getFilesystem(), this.splitdir); Index: src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java (revision 1005894) +++ src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java (working copy) @@ -120,6 +120,7 @@ * @return Null if not found (and logs fact that expected COL_REGIONINFO * was missing) else deserialized {@link HRegionInfo} * @throws IOException + * @throws NullPointerException if null cell */ static HRegionInfo getHRegionInfo(final Result result) throws IOException { Index: src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java (revision 1005894) +++ src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java (working copy) @@ -19,6 +19,7 @@ */ package org.apache.hadoop.hbase.catalog; +import java.io.EOFException; import java.io.IOException; import java.net.ConnectException; import java.util.concurrent.atomic.AtomicBoolean; @@ -355,6 +356,17 @@ } else { throw e; } + } catch (IOException ioe) { + Throwable cause = ioe.getCause(); + if (cause != null && cause instanceof EOFException) { + // Catch. Other end disconnected us. + } else if (cause != null && cause.getMessage() != null && + cause.getMessage().toLowerCase().contains("connection reset")) { + // Catch. Connection reset. + } else { + throw ioe; + } + } return protocol; }