From 71a5df694ad2fe8006757e332a29ad5135947e07 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 12 Dec 2013 11:17:11 -0800 Subject: [PATCH] HBASE-9117 Remove HTablePool and all HConnection pooling APIs --- .../hadoop/hbase/catalog/CatalogTracker.java | 2 +- .../apache/hadoop/hbase/client/ClientScanner.java | 2 +- .../hadoop/hbase/client/ClientSmallScanner.java | 2 +- .../org/apache/hadoop/hbase/client/HBaseAdmin.java | 4 +- .../apache/hadoop/hbase/client/HConnection.java | 4 +- .../hadoop/hbase/client/HConnectionManager.java | 284 +--------- .../org/apache/hadoop/hbase/client/HTable.java | 36 +- .../org/apache/hadoop/hbase/client/HTablePool.java | 614 --------------------- .../hbase/client/replication/ReplicationAdmin.java | 2 +- .../hadoop/hbase/client/TestAsyncProcess.java | 1 - .../hadoop/hbase/client/TestClientNoCluster.java | 17 +- .../apache/hadoop/hbase/util/ReflectionUtils.java | 19 + .../hadoop/hbase/DistributedHBaseCluster.java | 10 +- .../hbase/test/IntegrationTestBigLinkedList.java | 3 +- hbase-server/pom.xml | 2 +- .../hbase/tmpl/master/MasterStatusTmpl.jamon | 5 - .../apache/hadoop/hbase/master/ServerManager.java | 3 +- .../hadoop/hbase/regionserver/SplitLogWorker.java | 5 - .../hbase/regionserver/wal/HLogSplitter.java | 2 +- .../replication/master/ReplicationLogCleaner.java | 4 - .../regionserver/ReplicationHLogReaderManager.java | 3 - .../regionserver/ReplicationSource.java | 18 +- .../resources/hbase-webapps/master/snapshot.jsp | 3 - .../main/resources/hbase-webapps/master/table.jsp | 3 - .../java/org/apache/hadoop/hbase/HBaseCluster.java | 2 +- .../apache/hadoop/hbase/HBaseTestingUtility.java | 238 +++----- .../org/apache/hadoop/hbase/MiniHBaseCluster.java | 2 - .../hbase/ServerResourceCheckerJUnitListener.java | 42 -- .../org/apache/hadoop/hbase/TestZooKeeper.java | 14 +- .../hadoop/hbase/catalog/TestCatalogTracker.java | 7 +- .../catalog/TestMetaReaderEditorNoCluster.java | 8 +- .../hbase/client/HConnectionImplForTests.java | 47 ++ .../hbase/client/HConnectionTestingUtility.java | 63 +-- .../org/apache/hadoop/hbase/client/TestAdmin.java | 27 +- .../hadoop/hbase/client/TestFromClientSide.java | 13 +- .../hadoop/hbase/client/TestFromClientSide3.java | 8 +- .../hbase/client/TestHBaseAdminNoCluster.java | 14 +- .../org/apache/hadoop/hbase/client/TestHCM.java | 258 +-------- .../apache/hadoop/hbase/client/TestHTablePool.java | 364 ------------ .../hadoop/hbase/client/TestMultiParallel.java | 2 +- .../client/TestRestoreSnapshotFromClient.java | 2 +- .../hadoop/hbase/master/TestCatalogJanitor.java | 6 +- .../regionserver/TestEndToEndSplitTransaction.java | 5 +- .../apache/hadoop/hbase/util/TestHBaseFsck.java | 4 +- .../util/hbck/OfflineMetaRebuildTestCore.java | 2 - .../util/hbck/TestOfflineMetaRebuildBase.java | 1 - hbase-shell/pom.xml | 2 +- .../hbase/thrift2/ThriftHBaseServiceHandler.java | 66 ++- .../thrift2/TestThriftHBaseServiceHandler.java | 11 +- .../TestThriftHBaseServiceHandlerWithLabels.java | 2 +- 50 files changed, 321 insertions(+), 1937 deletions(-) delete mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/ServerResourceCheckerJUnitListener.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionImplForTests.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTablePool.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java index 52d8eeb..c912f8f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java @@ -146,7 +146,7 @@ public class CatalogTracker { public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf, Abortable abortable) throws IOException { - this(zk, conf, HConnectionManager.getConnection(conf), abortable); + this(zk, conf, HConnectionManager.createConnection(conf), abortable); } public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java index 5e28859..be3f2c6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java @@ -79,7 +79,7 @@ public class ClientScanner extends AbstractClientScanner { */ public ClientScanner(final Configuration conf, final Scan scan, final TableName tableName) throws IOException { - this(conf, scan, tableName, HConnectionManager.getConnection(conf)); + this(conf, scan, tableName, HConnectionManager.createConnection(conf)); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java index a17be55..6f4ea6e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java @@ -70,7 +70,7 @@ public class ClientSmallScanner extends ClientScanner { */ public ClientSmallScanner(final Configuration conf, final Scan scan, final TableName tableName) throws IOException { - this(conf, scan, tableName, HConnectionManager.getConnection(conf)); + this(conf, scan, tableName, HConnectionManager.createConnection(conf)); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 2e7db2c..4a884cf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -180,7 +180,7 @@ public class HBaseAdmin implements Abortable, Closeable { throws MasterNotRunningException, ZooKeeperConnectionException, IOException { // Will not leak connections, as the new implementation of the constructor // does not throw exceptions anymore. - this(HConnectionManager.getConnection(new Configuration(c))); + this(HConnectionManager.createConnection(new Configuration(c))); this.cleanupConnectionOnClose = true; } @@ -2321,7 +2321,7 @@ public class HBaseAdmin implements Abortable, Closeable { HConnectionManager.HConnectionImplementation connection = (HConnectionManager.HConnectionImplementation) - HConnectionManager.getConnection(copyOfConf); + HConnectionManager.createConnection(copyOfConf); try { // Check ZK first. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java index 4a1cc1f..3d0da7e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java @@ -231,9 +231,11 @@ public interface HConnection extends Abortable, Closeable { TableName[] listTableNames() throws IOException; /** + * Connects to the master to get the table descriptor. * @param tableName table name * @return table metadata - * @throws IOException if a remote or network exception occurs + * @throws IOException if the connection to master fails or if the table + * is not found. */ HTableDescriptor getHTableDescriptor(TableName tableName) throws IOException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index 533fcaa..a34bcd1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -26,7 +26,6 @@ import java.net.SocketException; import java.util.ArrayList; import java.util.Date; import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -50,7 +49,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Chore; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; @@ -146,50 +144,6 @@ import com.google.protobuf.ServiceException; * connection.close(); * } * } - *

The following logic and API will be removed in the future: - *

This class has a static Map of {@link HConnection} instances keyed by - * {@link Configuration}; all invocations of {@link #getConnection(Configuration)} - * that pass the same {@link Configuration} instance will be returned the same - * {@link HConnection} instance (Adding properties to a Configuration - * instance does not change its object identity; for more on how this is done see - * {@link HConnectionKey}). Sharing {@link HConnection} - * instances is usually what you want; all clients of the {@link HConnection} - * instances share the HConnections' cache of Region locations rather than each - * having to discover for itself the location of meta, etc. It makes - * sense for the likes of the pool of HTables class {@link HTablePool}, for - * instance (If concerned that a single {@link HConnection} is insufficient - * for sharing amongst clients in say an heavily-multithreaded environment, - * in practise its not proven to be an issue. Besides, {@link HConnection} is - * implemented atop Hadoop RPC and as of this writing, Hadoop RPC does a - * connection per cluster-member, exclusively). - * - *

But sharing connections makes clean up of {@link HConnection} instances a little awkward. - * Currently, clients cleanup by calling {@link #deleteConnection(Configuration)}. This will - * shutdown the zookeeper connection the HConnection was using and clean up all - * HConnection resources as well as stopping proxies to servers out on the - * cluster. Not running the cleanup will not end the world; it'll - * just stall the closeup some and spew some zookeeper connection failed - * messages into the log. Running the cleanup on a {@link HConnection} that is - * subsequently used by another will cause breakage so be careful running - * cleanup. - *

To create a {@link HConnection} that is not shared by others, you can - * create a new {@link Configuration} instance, pass this new instance to - * {@link #getConnection(Configuration)}, and then when done, close it up by - * doing something like the following: - *

- * {@code
- * Configuration newConfig = new Configuration(originalConf);
- * HConnection connection = HConnectionManager.getConnection(newConfig);
- * // Use the connection to your hearts' delight and then when done...
- * HConnectionManager.deleteConnection(newConfig, true);
- * }
- * 
- *

Cleanup used to be done inside in a shutdown hook. On startup we'd - * register a shutdown hook that called {@link #deleteAllConnections()} - * on its way out but the order in which shutdown hooks run is not defined so - * were problematic for clients of HConnection that wanted to register their - * own shutdown hooks so we removed ours though this shifts the onus for - * cleanup to the client. */ @SuppressWarnings("serial") @InterfaceAudience.Public @@ -197,16 +151,8 @@ import com.google.protobuf.ServiceException; public class HConnectionManager { static final Log LOG = LogFactory.getLog(HConnectionManager.class); - public static final String RETRIES_BY_SERVER_KEY = "hbase.client.retries.by.server"; private static final String CLIENT_NONCES_ENABLED_KEY = "hbase.client.nonces.enabled"; - // An LRU Map of HConnectionKey -> HConnection (TableServer). All - // access must be synchronized. This map is not private because tests - // need to be able to tinker with it. - static final Map CONNECTION_INSTANCES; - - public static final int MAX_CACHED_CONNECTION_INSTANCES; - /** * Global nonceGenerator shared per client.Currently there's no reason to limit its scope. * Once it's set under nonceGeneratorCreateLock, it is never unset or changed. @@ -215,23 +161,6 @@ public class HConnectionManager { /** The nonce generator lock. Only taken when creating HConnection, which gets a private copy. */ private static Object nonceGeneratorCreateLock = new Object(); - static { - // We set instances to one more than the value specified for {@link - // HConstants#ZOOKEEPER_MAX_CLIENT_CNXNS}. By default, the zk default max - // connections to the ensemble from the one client is 30, so in that case we - // should run into zk issues before the LRU hit this value of 31. - MAX_CACHED_CONNECTION_INSTANCES = HBaseConfiguration.create().getInt( - HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS, HConstants.DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS) + 1; - CONNECTION_INSTANCES = new LinkedHashMap( - (int) (MAX_CACHED_CONNECTION_INSTANCES / 0.75F) + 1, 0.75F, true) { - @Override - protected boolean removeEldestEntry( - Map.Entry eldest) { - return size() > MAX_CACHED_CONNECTION_INSTANCES; - } - }; - } - /* * Non-instantiable. */ @@ -254,38 +183,9 @@ public class HConnectionManager { } /** - * Get the connection that goes with the passed conf configuration instance. - * If no current connection exists, method creates a new connection and keys it using - * connection-specific properties from the passed {@link Configuration}; see - * {@link HConnectionKey}. - * @param conf configuration - * @return HConnection object for conf - * @throws ZooKeeperConnectionException - */ - @Deprecated - public static HConnection getConnection(final Configuration conf) - throws IOException { - HConnectionKey connectionKey = new HConnectionKey(conf); - synchronized (CONNECTION_INSTANCES) { - HConnectionImplementation connection = CONNECTION_INSTANCES.get(connectionKey); - if (connection == null) { - connection = (HConnectionImplementation)createConnection(conf, true); - CONNECTION_INSTANCES.put(connectionKey, connection); - } else if (connection.isClosed()) { - HConnectionManager.deleteConnection(connectionKey, true); - connection = (HConnectionImplementation)createConnection(conf, true); - CONNECTION_INSTANCES.put(connectionKey, connection); - } - connection.incCount(); - return connection; - } - } - - /** * Create a new HConnection instance using the passed conf instance. - *

Note: This bypasses the usual HConnection life cycle management done by - * {@link #getConnection(Configuration)}. The caller is responsible for - * calling {@link HConnection#close()} on the returned connection instance. + *

The caller is responsible for calling {@link HConnection#close()} on the + * returned connection instance. * * This is the recommended way to create HConnections. * {@code @@ -301,18 +201,15 @@ public class HConnectionManager { * @return HConnection object for conf * @throws ZooKeeperConnectionException */ - public static HConnection createConnection(Configuration conf) - throws IOException { + public static HConnection createConnection(Configuration conf) throws IOException { UserProvider provider = UserProvider.instantiate(conf); - return createConnection(conf, false, null, provider.getCurrent()); + return createConnection(conf, null, provider.getCurrent()); } /** * Create a new HConnection instance using the passed conf instance. - *

Note: This bypasses the usual HConnection life cycle management done by - * {@link #getConnection(Configuration)}. The caller is responsible for - * calling {@link HConnection#close()} on the returned connection instance. - * This is the recommended way to create HConnections. + *

The caller is responsible for calling {@link HConnection#close()} on the + * returned connection instance. This is the recommended way to create HConnections. * {@code * ExecutorService pool = ...; * HConnection connection = HConnectionManager.createConnection(conf, pool); @@ -330,15 +227,13 @@ public class HConnectionManager { public static HConnection createConnection(Configuration conf, ExecutorService pool) throws IOException { UserProvider provider = UserProvider.instantiate(conf); - return createConnection(conf, false, pool, provider.getCurrent()); + return createConnection(conf, pool, provider.getCurrent()); } /** * Create a new HConnection instance using the passed conf instance. - *

Note: This bypasses the usual HConnection life cycle management done by - * {@link #getConnection(Configuration)}. The caller is responsible for - * calling {@link HConnection#close()} on the returned connection instance. - * This is the recommended way to create HConnections. + *

The caller is responsible for calling {@link HConnection#close()} on the + * returned connection instance. This is the recommended way to create HConnections. * {@code * ExecutorService pool = ...; * HConnection connection = HConnectionManager.createConnection(conf, pool); @@ -355,15 +250,13 @@ public class HConnectionManager { */ public static HConnection createConnection(Configuration conf, User user) throws IOException { - return createConnection(conf, false, null, user); + return createConnection(conf, null, user); } /** * Create a new HConnection instance using the passed conf instance. - *

Note: This bypasses the usual HConnection life cycle management done by - * {@link #getConnection(Configuration)}. The caller is responsible for - * calling {@link HConnection#close()} on the returned connection instance. - * This is the recommended way to create HConnections. + *

The caller is responsible for calling {@link HConnection#close()} on the + * returned connection instance. This is the recommended way to create HConnections. * {@code * ExecutorService pool = ...; * HConnection connection = HConnectionManager.createConnection(conf, pool); @@ -380,24 +273,10 @@ public class HConnectionManager { * @throws ZooKeeperConnectionException */ public static HConnection createConnection(Configuration conf, ExecutorService pool, User user) - throws IOException { - return createConnection(conf, false, pool, user); - } - - @Deprecated - static HConnection createConnection(final Configuration conf, final boolean managed) throws IOException { - UserProvider provider = UserProvider.instantiate(conf); - return createConnection(conf, managed, null, provider.getCurrent()); - } - - @Deprecated - static HConnection createConnection(final Configuration conf, final boolean managed, - final ExecutorService pool, final User user) - throws IOException { String className = conf.get("hbase.client.connection.impl", HConnectionManager.HConnectionImplementation.class.getName()); - Class clazz = null; + Class clazz; try { clazz = Class.forName(className); } catch (ClassNotFoundException e) { @@ -406,95 +285,15 @@ public class HConnectionManager { try { // Default HCM#HCI is not accessible; make it so before invoking. Constructor constructor = - clazz.getDeclaredConstructor(Configuration.class, - boolean.class, ExecutorService.class, User.class); + clazz.getDeclaredConstructor(Configuration.class, ExecutorService.class, User.class); constructor.setAccessible(true); - return (HConnection) constructor.newInstance(conf, managed, pool, user); + return (HConnection) constructor.newInstance(conf, pool, user); } catch (Exception e) { throw new IOException(e); } } /** - * Delete connection information for the instance specified by passed configuration. - * If there are no more references to the designated connection connection, this method will - * then close connection to the zookeeper ensemble and let go of all associated resources. - * - * @param conf configuration whose identity is used to find {@link HConnection} instance. - * @deprecated - */ - public static void deleteConnection(Configuration conf) { - deleteConnection(new HConnectionKey(conf), false); - } - - /** - * Cleanup a known stale connection. - * This will then close connection to the zookeeper ensemble and let go of all resources. - * - * @param connection - * @deprecated - */ - public static void deleteStaleConnection(HConnection connection) { - deleteConnection(connection, true); - } - - /** - * Delete information for all connections. Close or not the connection, depending on the - * staleConnection boolean and the ref count. By default, you should use it with - * staleConnection to true. - * @deprecated - */ - public static void deleteAllConnections(boolean staleConnection) { - synchronized (CONNECTION_INSTANCES) { - Set connectionKeys = new HashSet(); - connectionKeys.addAll(CONNECTION_INSTANCES.keySet()); - for (HConnectionKey connectionKey : connectionKeys) { - deleteConnection(connectionKey, staleConnection); - } - CONNECTION_INSTANCES.clear(); - } - } - - /** - * Delete information for all connections.. - * @deprecated kept for backward compatibility, but the behavior is broken. HBASE-8983 - */ - @Deprecated - public static void deleteAllConnections() { - deleteAllConnections(false); - } - - - @Deprecated - private static void deleteConnection(HConnection connection, boolean staleConnection) { - synchronized (CONNECTION_INSTANCES) { - for (Entry e: CONNECTION_INSTANCES.entrySet()) { - if (e.getValue() == connection) { - deleteConnection(e.getKey(), staleConnection); - break; - } - } - } - } - - @Deprecated - private static void deleteConnection(HConnectionKey connectionKey, boolean staleConnection) { - synchronized (CONNECTION_INSTANCES) { - HConnectionImplementation connection = CONNECTION_INSTANCES.get(connectionKey); - if (connection != null) { - connection.decCount(); - if (connection.isZeroReference() || staleConnection) { - CONNECTION_INSTANCES.remove(connectionKey); - connection.internalClose(); - } - } else { - LOG.error("Connection not found in the list, can't delete it "+ - "(connection key=" + connectionKey + "). May be the key was modified?", new Exception()); - } - } - } - - /** * It is provided for unit test cases which verify the behavior of region * location cache prefetch. * @return Number of cached regions for the table. @@ -544,7 +343,7 @@ public class HConnectionManager { return null; } Configuration conf = connectable.conf; - HConnection connection = HConnectionManager.getConnection(conf); + HConnection connection = HConnectionManager.createConnection(conf); boolean connectSucceeded = false; try { T returnValue = connectable.connect(connection); @@ -624,9 +423,6 @@ public class HConnectionManager { private int refCount; - // indicates whether this connection's life cycle is managed (by us) - private boolean managed; - private User user; /** @@ -634,27 +430,15 @@ public class HConnectionManager { */ Registry registry; - HConnectionImplementation(Configuration conf, boolean managed) throws IOException { - this(conf, managed, null, null); - } - /** * constructor * @param conf Configuration object - * @param managed If true, does not do full shutdown on close; i.e. cleanup of connection - * to zk and shutdown of all services; we just close down the resources this connection was - * responsible for and decrement usage counters. It is up to the caller to do the full - * cleanup. It is set when we want have connection sharing going on -- reuse of zk connection, - * and cached region locations, established regionserver connections, etc. When connections - * are shared, we have reference counting going on and will only do full cleanup when no more - * users of an HConnectionImplementation instance. */ - HConnectionImplementation(Configuration conf, boolean managed, - ExecutorService pool, User user) throws IOException { + HConnectionImplementation(Configuration conf, ExecutorService pool, User user) + throws IOException { this(conf); this.user = user; this.batchPool = pool; - this.managed = managed; this.registry = setupRegistry(); retrieveClusterId(); @@ -754,9 +538,6 @@ public class HConnectionManager { @Override public HTableInterface getTable(TableName tableName, ExecutorService pool) throws IOException { - if (managed) { - throw new IOException("The connection has to be unmanaged."); - } return new HTable(tableName, this, pool); } @@ -813,7 +594,7 @@ public class HConnectionManager { private Registry setupRegistry() throws IOException { String registryClass = this.conf.get("hbase.client.registry.impl", ZooKeeperRegistry.class.getName()); - Registry registry = null; + Registry registry; try { registry = (Registry)Class.forName(registryClass).newInstance(); } catch (Throwable t) { @@ -836,7 +617,6 @@ public class HConnectionManager { /** * An identifier that will remain the same for a given connection. - * @return */ public String toString(){ return "hconnection-0x" + Integer.toHexString(hashCode()); @@ -1015,11 +795,7 @@ public class HConnectionManager { @Override public boolean isDeadServer(ServerName sn) { - if (clusterStatusListener == null) { - return false; - } else { - return clusterStatusListener.isDeadServer(sn); - } + return clusterStatusListener != null && clusterStatusListener.isDeadServer(sn); } @Override @@ -1347,8 +1123,6 @@ public class HConnectionManager { /** * Delete a cached location, no matter what it is. Called when we were told to not use cache. - * @param tableName tableName - * @param row */ void forceDeleteCachedLocation(final TableName tableName, final byte [] row) { HRegionLocation rl = null; @@ -2508,7 +2282,8 @@ public class HConnectionManager { return refCount == 0; } - void internalClose() { + @Override + public void close() { if (this.closed) { return; } @@ -2523,19 +2298,6 @@ public class HConnectionManager { } } - @Override - public void close() { - if (managed) { - if (aborted) { - HConnectionManager.deleteStaleConnection(this); - } else { - HConnectionManager.deleteConnection(this, false); - } - } else { - internalClose(); - } - } - /** * Close the connection for good, regardless of what the current value of * {@link #refCount} is. Ideally, {@link #refCount} should be zero at this @@ -2612,7 +2374,7 @@ public class HConnectionManager { @Override public HTableDescriptor[] getHTableDescriptors( List names) throws IOException { - List tableNames = new ArrayList(names.size()); + List tableNames = new ArrayList(names.size()); for(String name : names) { tableNames.add(TableName.valueOf(name)); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index 2cdcc77..6c6558d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -136,11 +136,7 @@ public class HTable implements HTableInterface { private RpcRetryingCallerFactory rpcCallerFactory; /** - * Creates an object to access a HBase table. - * Shares zookeeper connection and other resources with other HTable instances - * created with the same conf instance. Uses already-populated - * region cache if one is available, populated by any other HTable instances - * sharing this conf instance. Recommended. + * Creates an object to access a HBase table. Instantiates its own {@link HConnection}. * @param conf Configuration object to use. * @param tableName Name of the table. * @throws IOException if a remote or network exception occurs @@ -151,11 +147,7 @@ public class HTable implements HTableInterface { } /** - * Creates an object to access a HBase table. - * Shares zookeeper connection and other resources with other HTable instances - * created with the same conf instance. Uses already-populated - * region cache if one is available, populated by any other HTable instances - * sharing this conf instance. Recommended. + * Creates an object to access a HBase table. Instantiates its own {@link HConnection}. * @param conf Configuration object to use. * @param tableName Name of the table. * @throws IOException if a remote or network exception occurs @@ -165,14 +157,8 @@ public class HTable implements HTableInterface { this(conf, TableName.valueOf(tableName)); } - - /** - * Creates an object to access a HBase table. - * Shares zookeeper connection and other resources with other HTable instances - * created with the same conf instance. Uses already-populated - * region cache if one is available, populated by any other HTable instances - * sharing this conf instance. Recommended. + * Creates an object to access a HBase table. Instantiates its own {@link HConnection}. * @param conf Configuration object to use. * @param tableName table name pojo * @throws IOException if a remote or network exception occurs @@ -185,7 +171,7 @@ public class HTable implements HTableInterface { this.connection = null; return; } - this.connection = HConnectionManager.getConnection(conf); + this.connection = HConnectionManager.createConnection(conf); this.configuration = conf; this.pool = getDefaultExecutor(conf); @@ -229,11 +215,7 @@ public class HTable implements HTableInterface { } /** - * Creates an object to access a HBase table. - * Shares zookeeper connection and other resources with other HTable instances - * created with the same conf instance. Uses already-populated - * region cache if one is available, populated by any other HTable instances - * sharing this conf instance. + * Creates an object to access a HBase table. Instantiates its own {@link HConnection}. * Use this constructor when the ExecutorService is externally managed. * @param conf Configuration object to use. * @param tableName Name of the table. @@ -246,11 +228,7 @@ public class HTable implements HTableInterface { } /** - * Creates an object to access a HBase table. - * Shares zookeeper connection and other resources with other HTable instances - * created with the same conf instance. Uses already-populated - * region cache if one is available, populated by any other HTable instances - * sharing this conf instance. + * Creates an object to access a HBase table. Instantiates its own {@link HConnection}. * Use this constructor when the ExecutorService is externally managed. * @param conf Configuration object to use. * @param tableName Name of the table. @@ -259,7 +237,7 @@ public class HTable implements HTableInterface { */ public HTable(Configuration conf, final TableName tableName, final ExecutorService pool) throws IOException { - this.connection = HConnectionManager.getConnection(conf); + this.connection = HConnectionManager.createConnection(conf); this.configuration = conf; this.pool = pool; this.tableName = tableName; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java deleted file mode 100644 index 7818011..0000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTablePool.java +++ /dev/null @@ -1,614 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.Closeable; -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.coprocessor.Batch; -import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback; -import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.PoolMap; -import org.apache.hadoop.hbase.util.PoolMap.PoolType; - -import com.google.protobuf.Service; -import com.google.protobuf.ServiceException; - -/** - * A simple pool of HTable instances. - * - * Each HTablePool acts as a pool for all tables. To use, instantiate an - * HTablePool and use {@link #getTable(String)} to get an HTable from the pool. - * - * This method is not needed anymore, clients should call - * HTableInterface.close() rather than returning the tables to the pool - * - * Once you are done with it, close your instance of {@link HTableInterface} - * by calling {@link HTableInterface#close()} rather than returning the tables - * to the pool with (deprecated) {@link #putTable(HTableInterface)}. - * - *

- * A pool can be created with a maxSize which defines the most HTable - * references that will ever be retained for each table. Otherwise the default - * is {@link Integer#MAX_VALUE}. - * - *

- * Pool will manage its own connections to the cluster. See - * {@link HConnectionManager}. - * @deprecated Use {@link HConnection#getTable(String)} instead. - */ -@InterfaceAudience.Private -public class HTablePool implements Closeable { - private final PoolMap tables; - private final int maxSize; - private final PoolType poolType; - private final Configuration config; - private final HTableInterfaceFactory tableFactory; - - /** - * Default Constructor. Default HBaseConfiguration and no limit on pool size. - */ - public HTablePool() { - this(HBaseConfiguration.create(), Integer.MAX_VALUE); - } - - /** - * Constructor to set maximum versions and use the specified configuration. - * - * @param config - * configuration - * @param maxSize - * maximum number of references to keep for each table - */ - public HTablePool(final Configuration config, final int maxSize) { - this(config, maxSize, null, null); - } - - /** - * Constructor to set maximum versions and use the specified configuration and - * table factory. - * - * @param config - * configuration - * @param maxSize - * maximum number of references to keep for each table - * @param tableFactory - * table factory - */ - public HTablePool(final Configuration config, final int maxSize, - final HTableInterfaceFactory tableFactory) { - this(config, maxSize, tableFactory, PoolType.Reusable); - } - - /** - * Constructor to set maximum versions and use the specified configuration and - * pool type. - * - * @param config - * configuration - * @param maxSize - * maximum number of references to keep for each table - * @param poolType - * pool type which is one of {@link PoolType#Reusable} or - * {@link PoolType#ThreadLocal} - */ - public HTablePool(final Configuration config, final int maxSize, - final PoolType poolType) { - this(config, maxSize, null, poolType); - } - - /** - * Constructor to set maximum versions and use the specified configuration, - * table factory and pool type. The HTablePool supports the - * {@link PoolType#Reusable} and {@link PoolType#ThreadLocal}. If the pool - * type is null or not one of those two values, then it will default to - * {@link PoolType#Reusable}. - * - * @param config - * configuration - * @param maxSize - * maximum number of references to keep for each table - * @param tableFactory - * table factory - * @param poolType - * pool type which is one of {@link PoolType#Reusable} or - * {@link PoolType#ThreadLocal} - */ - public HTablePool(final Configuration config, final int maxSize, - final HTableInterfaceFactory tableFactory, PoolType poolType) { - // Make a new configuration instance so I can safely cleanup when - // done with the pool. - this.config = config == null ? HBaseConfiguration.create() : config; - this.maxSize = maxSize; - this.tableFactory = tableFactory == null ? new HTableFactory() - : tableFactory; - if (poolType == null) { - this.poolType = PoolType.Reusable; - } else { - switch (poolType) { - case Reusable: - case ThreadLocal: - this.poolType = poolType; - break; - default: - this.poolType = PoolType.Reusable; - break; - } - } - this.tables = new PoolMap(this.poolType, - this.maxSize); - } - - /** - * Get a reference to the specified table from the pool. - *

- *

- * - * @param tableName - * table name - * @return a reference to the specified table - * @throws RuntimeException - * if there is a problem instantiating the HTable - */ - public HTableInterface getTable(String tableName) { - // call the old getTable implementation renamed to findOrCreateTable - HTableInterface table = findOrCreateTable(tableName); - // return a proxy table so when user closes the proxy, the actual table - // will be returned to the pool - return new PooledHTable(table); - } - - /** - * Get a reference to the specified table from the pool. - *

- * - * Create a new one if one is not available. - * - * @param tableName - * table name - * @return a reference to the specified table - * @throws RuntimeException - * if there is a problem instantiating the HTable - */ - private HTableInterface findOrCreateTable(String tableName) { - HTableInterface table = tables.get(tableName); - if (table == null) { - table = createHTable(tableName); - } - return table; - } - - /** - * Get a reference to the specified table from the pool. - *

- * - * Create a new one if one is not available. - * - * @param tableName - * table name - * @return a reference to the specified table - * @throws RuntimeException - * if there is a problem instantiating the HTable - */ - public HTableInterface getTable(byte[] tableName) { - return getTable(Bytes.toString(tableName)); - } - - /** - * This method is not needed anymore, clients should call - * HTableInterface.close() rather than returning the tables to the pool - * - * @param table - * the proxy table user got from pool - * @deprecated - */ - public void putTable(HTableInterface table) throws IOException { - // we need to be sure nobody puts a proxy implementation in the pool - // but if the client code is not updated - // and it will continue to call putTable() instead of calling close() - // then we need to return the wrapped table to the pool instead of the - // proxy - // table - if (table instanceof PooledHTable) { - returnTable(((PooledHTable) table).getWrappedTable()); - } else { - // normally this should not happen if clients pass back the same - // table - // object they got from the pool - // but if it happens then it's better to reject it - throw new IllegalArgumentException("not a pooled table: " + table); - } - } - - /** - * Puts the specified HTable back into the pool. - *

- * - * If the pool already contains maxSize references to the table, then - * the table instance gets closed after flushing buffered edits. - * - * @param table - * table - */ - private void returnTable(HTableInterface table) throws IOException { - // this is the old putTable method renamed and made private - String tableName = Bytes.toString(table.getTableName()); - if (tables.size(tableName) >= maxSize) { - // release table instance since we're not reusing it - this.tables.removeValue(tableName, table); - this.tableFactory.releaseHTableInterface(table); - return; - } - tables.put(tableName, table); - } - - protected HTableInterface createHTable(String tableName) { - return this.tableFactory.createHTableInterface(config, - Bytes.toBytes(tableName)); - } - - /** - * Closes all the HTable instances , belonging to the given table, in the - * table pool. - *

- * Note: this is a 'shutdown' of the given table pool and different from - * {@link #putTable(HTableInterface)}, that is used to return the table - * instance to the pool for future re-use. - * - * @param tableName - */ - public void closeTablePool(final String tableName) throws IOException { - Collection tables = this.tables.values(tableName); - if (tables != null) { - for (HTableInterface table : tables) { - this.tableFactory.releaseHTableInterface(table); - } - } - this.tables.remove(tableName); - } - - /** - * See {@link #closeTablePool(String)}. - * - * @param tableName - */ - public void closeTablePool(final byte[] tableName) throws IOException { - closeTablePool(Bytes.toString(tableName)); - } - - /** - * Closes all the HTable instances , belonging to all tables in the table - * pool. - *

- * Note: this is a 'shutdown' of all the table pools. - */ - public void close() throws IOException { - for (String tableName : tables.keySet()) { - closeTablePool(tableName); - } - this.tables.clear(); - } - - public int getCurrentPoolSize(String tableName) { - return tables.size(tableName); - } - - /** - * A proxy class that implements HTableInterface.close method to return the - * wrapped table back to the table pool - * - */ - class PooledHTable implements HTableInterface { - - private boolean open = false; - - private HTableInterface table; // actual table implementation - - public PooledHTable(HTableInterface table) { - this.table = table; - this.open = true; - } - - @Override - public byte[] getTableName() { - checkState(); - return table.getTableName(); - } - - @Override - public TableName getName() { - return table.getName(); - } - - @Override - public Configuration getConfiguration() { - checkState(); - return table.getConfiguration(); - } - - @Override - public HTableDescriptor getTableDescriptor() throws IOException { - checkState(); - return table.getTableDescriptor(); - } - - @Override - public boolean exists(Get get) throws IOException { - checkState(); - return table.exists(get); - } - - @Override - public Boolean[] exists(List gets) throws IOException { - checkState(); - return table.exists(gets); - } - - @Override - public void batch(List actions, Object[] results) throws IOException, - InterruptedException { - checkState(); - table.batch(actions, results); - } - - @Override - public Object[] batch(List actions) throws IOException, - InterruptedException { - checkState(); - return table.batch(actions); - } - - @Override - public Result get(Get get) throws IOException { - checkState(); - return table.get(get); - } - - @Override - public Result[] get(List gets) throws IOException { - checkState(); - return table.get(gets); - } - - @Override - @SuppressWarnings("deprecation") - public Result getRowOrBefore(byte[] row, byte[] family) throws IOException { - checkState(); - return table.getRowOrBefore(row, family); - } - - @Override - public ResultScanner getScanner(Scan scan) throws IOException { - checkState(); - return table.getScanner(scan); - } - - @Override - public ResultScanner getScanner(byte[] family) throws IOException { - checkState(); - return table.getScanner(family); - } - - @Override - public ResultScanner getScanner(byte[] family, byte[] qualifier) - throws IOException { - checkState(); - return table.getScanner(family, qualifier); - } - - @Override - public void put(Put put) throws IOException { - checkState(); - table.put(put); - } - - @Override - public void put(List puts) throws IOException { - checkState(); - table.put(puts); - } - - @Override - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Put put) throws IOException { - checkState(); - return table.checkAndPut(row, family, qualifier, value, put); - } - - @Override - public void delete(Delete delete) throws IOException { - checkState(); - table.delete(delete); - } - - @Override - public void delete(List deletes) throws IOException { - checkState(); - table.delete(deletes); - } - - @Override - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Delete delete) throws IOException { - checkState(); - return table.checkAndDelete(row, family, qualifier, value, delete); - } - - @Override - public Result increment(Increment increment) throws IOException { - checkState(); - return table.increment(increment); - } - - @Override - public long incrementColumnValue(byte[] row, byte[] family, - byte[] qualifier, long amount) throws IOException { - checkState(); - return table.incrementColumnValue(row, family, qualifier, amount); - } - - @Override - public long incrementColumnValue(byte[] row, byte[] family, - byte[] qualifier, long amount, Durability durability) throws IOException { - checkState(); - return table.incrementColumnValue(row, family, qualifier, amount, - durability); - } - - @Override - public boolean isAutoFlush() { - checkState(); - return table.isAutoFlush(); - } - - @Override - public void flushCommits() throws IOException { - checkState(); - table.flushCommits(); - } - - /** - * Returns the actual table back to the pool - * - * @throws IOException - */ - public void close() throws IOException { - checkState(); - open = false; - returnTable(table); - } - - @Override - public CoprocessorRpcChannel coprocessorService(byte[] row) { - checkState(); - return table.coprocessorService(row); - } - - @Override - public Map coprocessorService(Class service, - byte[] startKey, byte[] endKey, Batch.Call callable) - throws ServiceException, Throwable { - checkState(); - return table.coprocessorService(service, startKey, endKey, callable); - } - - @Override - public void coprocessorService(Class service, - byte[] startKey, byte[] endKey, Batch.Call callable, Callback callback) - throws ServiceException, Throwable { - checkState(); - table.coprocessorService(service, startKey, endKey, callable, callback); - } - - @Override - public String toString() { - return "PooledHTable{" + ", table=" + table + '}'; - } - - /** - * Expose the wrapped HTable to tests in the same package - * - * @return wrapped htable - */ - HTableInterface getWrappedTable() { - return table; - } - - @Override - public void batchCallback(List actions, - Object[] results, Callback callback) throws IOException, - InterruptedException { - checkState(); - table.batchCallback(actions, results, callback); - } - - @Override - public Object[] batchCallback(List actions, - Callback callback) throws IOException, InterruptedException { - checkState(); - return table.batchCallback(actions, callback); - } - - @Override - public void mutateRow(RowMutations rm) throws IOException { - checkState(); - table.mutateRow(rm); - } - - @Override - public Result append(Append append) throws IOException { - checkState(); - return table.append(append); - } - - @Override - public void setAutoFlush(boolean autoFlush) { - checkState(); - table.setAutoFlush(autoFlush, autoFlush); - } - - @Override - public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) { - checkState(); - table.setAutoFlush(autoFlush, clearBufferOnFail); - } - - @Override - public void setAutoFlushTo(boolean autoFlush) { - table.setAutoFlushTo(autoFlush); - } - - @Override - public long getWriteBufferSize() { - checkState(); - return table.getWriteBufferSize(); - } - - @Override - public void setWriteBufferSize(long writeBufferSize) throws IOException { - checkState(); - table.setWriteBufferSize(writeBufferSize); - } - - boolean isOpen() { - return open; - } - - private void checkState() { - if (!isOpen()) { - throw new IllegalStateException("Table=" + new String(table.getTableName()) + " already closed"); - } - } - - @Override - public long incrementColumnValue(byte[] row, byte[] family, - byte[] qualifier, long amount, boolean writeToWAL) throws IOException { - return table.incrementColumnValue(row, family, qualifier, amount, writeToWAL); - } - } -} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java index d2d10f8..2093b7e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java @@ -95,7 +95,7 @@ public class ReplicationAdmin implements Closeable { throw new RuntimeException("hbase.replication isn't true, please " + "enable it in order to use replication"); } - this.connection = HConnectionManager.getConnection(conf); + this.connection = HConnectionManager.createConnection(conf); ZooKeeperWatcher zkw = createZooKeeperWatcher(); try { this.replicationPeers = ReplicationFactory.getReplicationPeers(zkw, conf, this.connection); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java index 17fb92a..a6e9ec0 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java @@ -679,7 +679,6 @@ public class TestAsyncProcess { public void testErrorsServers() throws IOException { HTable ht = new HTable(); Configuration configuration = new Configuration(conf); - configuration.setBoolean(HConnectionManager.RETRIES_BY_SERVER_KEY, true); configuration.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3); // set default writeBufferSize ht.setWriteBufferSize(configuration.getLong("hbase.client.write.buffer", 2097152)); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java index dcdb8fd..c9de11f 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java @@ -258,8 +258,8 @@ public class TestClientNoCluster extends Configured implements Tool { final ClientService.BlockingInterface stub; ScanOpenNextThenExceptionThenRecoverConnection(Configuration conf, - boolean managed, ExecutorService pool) throws IOException { - super(conf, managed); + ExecutorService pool) throws IOException { + super(conf); // Mock up my stub so open scanner returns a scanner id and then on next, we throw // exceptions for three times and then after that, we return no more to scan. this.stub = Mockito.mock(ClientService.BlockingInterface.class); @@ -289,9 +289,9 @@ public class TestClientNoCluster extends Configured implements Tool { extends HConnectionManager.HConnectionImplementation { final ClientService.BlockingInterface stub; - RegionServerStoppedOnScannerOpenConnection(Configuration conf, boolean managed, + RegionServerStoppedOnScannerOpenConnection(Configuration conf, ExecutorService pool, User user) throws IOException { - super(conf, managed); + super(conf, pool, user); // Mock up my stub so open scanner returns a scanner id and then on next, we throw // exceptions for three times and then after that, we return no more to scan. this.stub = Mockito.mock(ClientService.BlockingInterface.class); @@ -321,9 +321,9 @@ public class TestClientNoCluster extends Configured implements Tool { extends HConnectionManager.HConnectionImplementation { final ClientService.BlockingInterface stub; - RpcTimeoutConnection(Configuration conf, boolean managed, ExecutorService pool, User user) + RpcTimeoutConnection(Configuration conf, ExecutorService pool, User user) throws IOException { - super(conf, managed); + super(conf, pool, user); // Mock up my stub so an exists call -- which turns into a get -- throws an exception this.stub = Mockito.mock(ClientService.BlockingInterface.class); try { @@ -356,10 +356,9 @@ public class TestClientNoCluster extends Configured implements Tool { final AtomicLong sequenceids = new AtomicLong(0); private final Configuration conf; - ManyServersManyRegionsConnection(Configuration conf, boolean managed, - ExecutorService pool, User user) + ManyServersManyRegionsConnection(Configuration conf, ExecutorService pool, User user) throws IOException { - super(conf, managed, pool, user); + super(conf, pool, user); int serverCount = conf.getInt("hbase.test.servers", 10); this.serversByClient = new HashMap(serverCount); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java index cda5424..8361375 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ReflectionUtils.java @@ -18,7 +18,9 @@ */ package org.apache.hadoop.hbase.util; +import java.lang.reflect.Field; import java.lang.reflect.InvocationTargetException; +import java.nio.file.NoSuchFileException; import org.apache.hadoop.classification.InterfaceAudience; @@ -47,4 +49,21 @@ public class ReflectionUtils { "Unable to find suitable constructor for class " + className, e); } } + + /** + * Like {@link Class#getDeclaredField(String)}, but walks the inheritance graph. + */ + public static Field getAllDeclaredField(Class clazz, String fieldName) + throws NoSuchFieldException { + try { + return clazz.getDeclaredField(fieldName); + } catch (NoSuchFieldException e) { + Class parent = clazz.getSuperclass(); + if (parent != null) { + return getAllDeclaredField(parent, fieldName); + } else { + throw e; + } + } + } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java index f1e0b16..13612a5 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java @@ -45,6 +45,7 @@ import com.google.common.collect.Sets; @InterfaceAudience.Private public class DistributedHBaseCluster extends HBaseCluster { + private HConnection conn; private HBaseAdmin admin; private ClusterManager clusterManager; @@ -53,7 +54,8 @@ public class DistributedHBaseCluster extends HBaseCluster { throws IOException { super(conf); this.clusterManager = clusterManager; - this.admin = new HBaseAdmin(conf); + this.conn = HConnectionManager.createConnection(conf); + this.admin = new HBaseAdmin(conn); this.initialClusterStatus = getClusterStatus(); } @@ -81,9 +83,8 @@ public class DistributedHBaseCluster extends HBaseCluster { @Override public void close() throws IOException { - if (this.admin != null) { - admin.close(); - } + if (this.admin != null) admin.close(); + if (this.conn != null) conn.close(); } @Override @@ -138,7 +139,6 @@ public class DistributedHBaseCluster extends HBaseCluster { @Override public MasterService.BlockingInterface getMaster() throws IOException { - HConnection conn = HConnectionManager.getConnection(conf); return conn.getMaster(); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java index fb1f3e4..2275dc5 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java @@ -705,7 +705,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { if (!success) { Configuration conf = job.getConfiguration(); - HConnection conn = HConnectionManager.getConnection(conf); + HConnection conn = HConnectionManager.createConnection(conf); TableName tableName = getTableName(conf); CounterGroup g = counters.getGroup("undef"); Iterator it = g.iterator(); @@ -723,6 +723,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { HRegionLocation loc = conn.relocateRegion(tableName, key); LOG.error("unreferred row " + keyString + ", " + loc); } + conn.close(); } return success; } diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index f5723b2..b36606d 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -207,7 +207,7 @@ listener - org.apache.hadoop.hbase.ServerResourceCheckerJUnitListener + org.apache.hadoop.hbase.ResourceCheckerJUnitListener diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon index fa3b76b..5449ce3 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon @@ -43,7 +43,6 @@ org.apache.hadoop.hbase.NamespaceDescriptor; org.apache.hadoop.hbase.ServerLoad; org.apache.hadoop.hbase.ServerName; org.apache.hadoop.hbase.client.HBaseAdmin; -org.apache.hadoop.hbase.client.HConnectionManager; org.apache.hadoop.hbase.HTableDescriptor; org.apache.hadoop.hbase.HBaseConfiguration; org.apache.hadoop.hbase.TableName; @@ -401,7 +400,6 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); ServerName [] deadServerNames = deadServers.toArray(new ServerName[deadServers.size()]); Arrays.sort(deadServerNames); for (ServerName deadServerName: deadServerNames) { - int infoPort = master.getConfiguration().getInt("hbase.regionserver.info.port", 60030); @@ -418,7 +416,4 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); -<%java> - HConnectionManager.deleteConnection(admin.getConfiguration()); - diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 8499520..3771025 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -189,7 +189,7 @@ public class ServerManager { Configuration c = master.getConfiguration(); maxSkew = c.getLong("hbase.master.maxclockskew", 30000); warningSkew = c.getLong("hbase.master.warningclockskew", 10000); - this.connection = connect ? HConnectionManager.getConnection(c) : null; + this.connection = connect ? HConnectionManager.createConnection(c) : null; } /** @@ -273,7 +273,6 @@ public class ServerManager { * Check is a server of same host and port already exists, * if not, or the existed one got a smaller start code, record it. * - * @param sn the server to check and record * @param sl the server load on the server * @return true if the server is recorded, otherwise, false */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java index 5ae8642..479017c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogCounters; import org.apache.hadoop.hbase.SplitLogTask; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.executor.ExecutorService; @@ -177,10 +176,6 @@ public class SplitLogWorker extends ZooKeeperListener implements Runnable { boolean distributedLogReplay = this.conf.getBoolean( HConstants.DISTRIBUTED_LOG_REPLAY_KEY, HConstants.DEFAULT_DISTRIBUTED_LOG_REPLAY_CONFIG); - if (distributedLogReplay) { - // initialize a new connection for splitlogworker configuration - HConnectionManager.getConnection(conf); - } // wait for master to create the splitLogZnode int res = -1; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java index ddd928c..68ee76c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java @@ -1793,7 +1793,7 @@ public class HLogSplitter { synchronized (this.tableNameToHConnectionMap) { hconn = this.tableNameToHConnectionMap.get(tableName); if (hconn == null) { - hconn = HConnectionManager.getConnection(conf); + hconn = HConnectionManager.createConnection(conf); this.tableNameToHConnectionMap.put(tableName, hconn); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java index cea5750..838d7f9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java @@ -25,13 +25,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.zookeeper.KeeperException; import java.io.IOException; import java.util.List; @@ -137,8 +135,6 @@ public class ReplicationLogCleaner extends BaseLogCleanerDelegate implements Abo LOG.info("Stopping " + this.zkw); this.zkw.close(); } - // Not sure why we're deleting a connection that we never acquired or used - HConnectionManager.deleteConnection(this.getConf()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationHLogReaderManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationHLogReaderManager.java index 6f8b7f5..2ad5f3e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationHLogReaderManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationHLogReaderManager.java @@ -46,8 +46,6 @@ public class ReplicationHLogReaderManager { /** * Creates the helper but doesn't open any file * Use setInitialPosition after using the constructor if some content needs to be skipped - * @param fs - * @param conf */ public ReplicationHLogReaderManager(FileSystem fs, Configuration conf) { this.fs = fs; @@ -56,7 +54,6 @@ public class ReplicationHLogReaderManager { /** * Opens the file at the current position - * @param path * @return an HLog reader. * @throws IOException */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 03bc742e..779a60b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -79,7 +79,6 @@ public class ReplicationSource extends Thread private HConnection conn; private ReplicationQueues replicationQueues; private ReplicationPeers replicationPeers; - private Configuration conf; private ReplicationQueueInfo replicationQueueInfo; // id of the peer cluster this source replicates to private String peerId; @@ -144,13 +143,12 @@ public class ReplicationSource extends Thread final ReplicationPeers replicationPeers, final Stoppable stopper, final String peerClusterZnode, final UUID clusterId) throws IOException { this.stopper = stopper; - this.conf = conf; this.replicationQueueSizeCapacity = - this.conf.getLong("replication.source.size.capacity", 1024*1024*64); + conf.getLong("replication.source.size.capacity", 1024*1024*64); this.replicationQueueNbCapacity = - this.conf.getInt("replication.source.nb.capacity", 25000); - this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 10); - this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier", + conf.getInt("replication.source.nb.capacity", 25000); + this.maxRetriesMultiplier = conf.getInt("replication.source.maxretriesmultiplier", 10); + this.socketTimeoutMultiplier = conf.getInt("replication.source.socketTimeoutMultiplier", maxRetriesMultiplier * maxRetriesMultiplier); this.queue = new PriorityBlockingQueue( @@ -159,15 +157,15 @@ public class ReplicationSource extends Thread // TODO: This connection is replication specific or we should make it particular to // replication and make replication specific settings such as compression or codec to use // passing Cells. - this.conn = HConnectionManager.getConnection(conf); + this.conn = HConnectionManager.createConnection(conf); this.replicationQueues = replicationQueues; this.replicationPeers = replicationPeers; this.manager = manager; this.sleepForRetries = - this.conf.getLong("replication.source.sleepforretries", 1000); + conf.getLong("replication.source.sleepforretries", 1000); this.fs = fs; this.metrics = new MetricsSource(peerClusterZnode); - this.repLogReader = new ReplicationHLogReaderManager(this.fs, this.conf); + this.repLogReader = new ReplicationHLogReaderManager(this.fs, conf); this.clusterId = clusterId; this.peerClusterZnode = peerClusterZnode; @@ -175,7 +173,7 @@ public class ReplicationSource extends Thread // ReplicationQueueInfo parses the peerId out of the znode for us this.peerId = this.replicationQueueInfo.getPeerId(); this.replicationSinkMgr = new ReplicationSinkManager(conn, peerId, replicationPeers, conf); - this.logQueueWarnThreshold = this.conf.getInt("replication.source.log.queue.warn", 2); + this.logQueueWarnThreshold = conf.getInt("replication.source.log.queue.warn", 2); } diff --git a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp index be9c750..d2449fc 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp @@ -21,7 +21,6 @@ import="java.util.Date" import="org.apache.hadoop.conf.Configuration" import="org.apache.hadoop.hbase.client.HBaseAdmin" - import="org.apache.hadoop.hbase.client.HConnectionManager" import="org.apache.hadoop.hbase.master.HMaster" import="org.apache.hadoop.hbase.snapshot.SnapshotInfo" import="org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription" @@ -189,8 +188,6 @@ <% } %> <% } // end else - -HConnectionManager.deleteConnection(hbadmin.getConfiguration()); %> diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index e0f5c6a..c195d3a 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -24,7 +24,6 @@ import="org.apache.hadoop.conf.Configuration" import="org.apache.hadoop.hbase.client.HTable" import="org.apache.hadoop.hbase.client.HBaseAdmin" - import="org.apache.hadoop.hbase.client.HConnectionManager" import="org.apache.hadoop.hbase.HRegionInfo" import="org.apache.hadoop.hbase.ServerName" import="org.apache.hadoop.hbase.ServerLoad" @@ -317,8 +316,6 @@ ex.printStackTrace(System.err); } } // end else - -HConnectionManager.deleteConnection(hbadmin.getConfiguration()); %> diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java index 1f6d666..45414ff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java @@ -95,7 +95,7 @@ public abstract class HBaseCluster implements Closeable, Configurable { } /** - * Returns an {@link MasterAdminService.BlockingInterface} to the active master + * Returns an {@link MasterService.BlockingInterface} to the active master */ public abstract MasterService.BlockingInterface getMaster() throws IOException; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 99a8f02..4abb7c8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -59,6 +59,8 @@ import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HConnectionImplForTests; +import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -259,22 +261,6 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { return htu; } - /** - * Returns this classes's instance of {@link Configuration}. Be careful how - * you use the returned Configuration since {@link HConnection} instances - * can be shared. The Map of HConnections is keyed by the Configuration. If - * say, a Connection was being used against a cluster that had been shutdown, - * see {@link #shutdownMiniCluster()}, then the Connection will no longer - * be wholesome. Rather than use the return direct, its usually best to - * make a copy and use that. Do - * Configuration c = new Configuration(INSTANCE.getConfiguration()); - * @return Instance of Configuration. - */ - @Override - public Configuration getConfiguration() { - return super.getConfiguration(); - } - public void setHBaseCluster(HBaseCluster hbaseCluster) { this.hbaseCluster = hbaseCluster; } @@ -962,6 +948,11 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { hbaseAdmin = null; } + if (conn != null) { + ((HConnectionImplForTests) conn).close0(); + conn = null; + } + if (zooKeeperWatcher != null) { zooKeeperWatcher.close(); zooKeeperWatcher = null; @@ -1040,10 +1031,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Create a table. - * @param tableName - * @param family * @return An HTable instance for the created table. - * @throws IOException */ public HTable createTable(String tableName, String family) throws IOException{ @@ -1052,10 +1040,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Create a table. - * @param tableName - * @param family * @return An HTable instance for the created table. - * @throws IOException */ public HTable createTable(byte[] tableName, byte[] family) throws IOException{ @@ -1064,10 +1049,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Create a table. - * @param tableName - * @param families * @return An HTable instance for the created table. - * @throws IOException */ public HTable createTable(TableName tableName, String[] families) throws IOException { @@ -1080,10 +1062,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Create a table. - * @param tableName - * @param family * @return An HTable instance for the created table. - * @throws IOException */ public HTable createTable(TableName tableName, byte[] family) throws IOException{ @@ -1093,28 +1072,43 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Create a table. - * @param tableName - * @param families * @return An HTable instance for the created table. - * @throws IOException */ public HTable createTable(byte[] tableName, byte[][] families) throws IOException { - return createTable(tableName, families, - new Configuration(getConfiguration())); + return createTable(TableName.valueOf(tableName), families); } /** * Create a table. - * @param tableName - * @param families * @return An HTable instance for the created table. - * @throws IOException */ public HTable createTable(TableName tableName, byte[][] families) throws IOException { - return createTable(tableName, families, - new Configuration(getConfiguration())); + HTableDescriptor desc = new HTableDescriptor(tableName); + for(byte[] family : families) { + HColumnDescriptor hcd = new HColumnDescriptor(family); + // Disable blooms (they are on by default as of 0.95) but we disable them here because + // tests have hard coded counts of what to expect in block cache, etc., and blooms being + // on is interfering. + hcd.setBloomFilterType(BloomType.NONE); + desc.addFamily(hcd); + } + getHBaseAdmin().createTable(desc); + return (HTable) getConnection().getTable(tableName); + } + + public HTable createTable(TableName tableName, byte[][] families, byte[][] splitRows) + throws IOException { + HTableDescriptor desc = new HTableDescriptor(tableName); + for(byte[] family:families) { + HColumnDescriptor hcd = new HColumnDescriptor(family); + desc.addFamily(hcd); + } + getHBaseAdmin().createTable(desc, splitRows); + // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned + waitUntilAllRegionsAssigned(tableName); + return (HTable) getConnection().getTable(tableName); } public HTable createTable(byte[] tableName, byte[][] families, @@ -1141,19 +1135,15 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { getHBaseAdmin().createTable(desc, startKey, endKey, numRegions); // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned waitUntilAllRegionsAssigned(tableName); - return new HTable(getConfiguration(), tableName); + return (HTable) getConnection().getTable(tableName); } /** * Create a table. - * @param htd - * @param families - * @param c Configuration to use * @return An HTable instance for the created table. - * @throws IOException */ - public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c) - throws IOException { + public HTable createTable(HTableDescriptor htd, byte[][] families) + throws IOException { for(byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family); // Disable blooms (they are on by default as of 0.95) but we disable them here because @@ -1165,56 +1155,48 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { getHBaseAdmin().createTable(htd); // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned waitUntilAllRegionsAssigned(htd.getTableName()); - return new HTable(c, htd.getTableName()); + return (HTable) getConnection().getTable(htd.getTableName()); } /** * Create a table. - * @param tableName - * @param families - * @param c Configuration to use * @return An HTable instance for the created table. - * @throws IOException + * @deprecated replaced by {@link #createTable(HTableDescriptor, byte[][])} */ + @Deprecated + public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c) + throws IOException { + return createTable(htd, families); + } + + /** + * Create a table. + * @return An HTable instance for the created table. + * @deprecated replaced by {@link #createTable(byte[], byte[][])} + */ + @Deprecated public HTable createTable(TableName tableName, byte[][] families, final Configuration c) throws IOException { - return createTable(new HTableDescriptor(tableName), families, c); + return createTable(new HTableDescriptor(tableName), families); } /** * Create a table. - * @param tableName - * @param families - * @param c Configuration to use * @return An HTable instance for the created table. - * @throws IOException + * @deprecated replaced by {@link #createTable(byte[], byte[][])} */ public HTable createTable(byte[] tableName, byte[][] families, final Configuration c) throws IOException { - HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); - for(byte[] family : families) { - HColumnDescriptor hcd = new HColumnDescriptor(family); - // Disable blooms (they are on by default as of 0.95) but we disable them here because - // tests have hard coded counts of what to expect in block cache, etc., and blooms being - // on is interfering. - hcd.setBloomFilterType(BloomType.NONE); - desc.addFamily(hcd); - } - getHBaseAdmin().createTable(desc); - return new HTable(c, tableName); + return createTable(tableName, families); } /** * Create a table. - * @param tableName - * @param families - * @param c Configuration to use - * @param numVersions * @return An HTable instance for the created table. - * @throws IOException */ + @Deprecated public HTable createTable(TableName tableName, byte[][] families, final Configuration c, int numVersions) throws IOException { @@ -1232,46 +1214,28 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Create a table. - * @param tableName - * @param families - * @param c Configuration to use - * @param numVersions * @return An HTable instance for the created table. - * @throws IOException + * @deprecated Replaced by {@link #createTable(byte[], byte[][], int)} */ + @Deprecated public HTable createTable(byte[] tableName, byte[][] families, final Configuration c, int numVersions) throws IOException { - HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); - for(byte[] family : families) { - HColumnDescriptor hcd = new HColumnDescriptor(family) - .setMaxVersions(numVersions); - desc.addFamily(hcd); - } - getHBaseAdmin().createTable(desc); - return new HTable(c, tableName); + return createTable(tableName, families, numVersions); } /** * Create a table. - * @param tableName - * @param family - * @param numVersions * @return An HTable instance for the created table. - * @throws IOException */ public HTable createTable(byte[] tableName, byte[] family, int numVersions) throws IOException { - return createTable(tableName, new byte[][]{family}, numVersions); + return createTable(TableName.valueOf(tableName), new byte[][]{family}, numVersions); } /** * Create a table. - * @param tableName - * @param family - * @param numVersions * @return An HTable instance for the created table. - * @throws IOException */ public HTable createTable(TableName tableName, byte[] family, int numVersions) throws IOException { @@ -1280,25 +1244,23 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Create a table. - * @param tableName - * @param families - * @param numVersions * @return An HTable instance for the created table. - * @throws IOException */ - public HTable createTable(byte[] tableName, byte[][] families, - int numVersions) + public HTable createTable(byte[] tableName, byte[][] families, int numVersions) throws IOException { - return createTable(TableName.valueOf(tableName), families, numVersions); + HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); + for(byte[] family : families) { + HColumnDescriptor hcd = new HColumnDescriptor(family) + .setMaxVersions(numVersions); + desc.addFamily(hcd); + } + getHBaseAdmin().createTable(desc); + return (HTable) getConnection().getTable(tableName); } /** * Create a table. - * @param tableName - * @param families - * @param numVersions * @return An HTable instance for the created table. - * @throws IOException */ public HTable createTable(TableName tableName, byte[][] families, int numVersions) @@ -1311,16 +1273,12 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { getHBaseAdmin().createTable(desc); // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned waitUntilAllRegionsAssigned(tableName); - return new HTable(new Configuration(getConfiguration()), tableName); + return (HTable) getConnection().getTable(tableName); } /** * Create a table. - * @param tableName - * @param families - * @param numVersions * @return An HTable instance for the created table. - * @throws IOException */ public HTable createTable(byte[] tableName, byte[][] families, int numVersions, int blockSize) throws IOException { @@ -1330,11 +1288,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Create a table. - * @param tableName - * @param families - * @param numVersions * @return An HTable instance for the created table. - * @throws IOException */ public HTable createTable(TableName tableName, byte[][] families, int numVersions, int blockSize) throws IOException { @@ -1348,16 +1302,12 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { getHBaseAdmin().createTable(desc); // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned waitUntilAllRegionsAssigned(tableName); - return new HTable(new Configuration(getConfiguration()), tableName); + return (HTable) getConnection().getTable(tableName); } /** * Create a table. - * @param tableName - * @param families - * @param numVersions * @return An HTable instance for the created table. - * @throws IOException */ public HTable createTable(byte[] tableName, byte[][] families, int[] numVersions) @@ -1367,11 +1317,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Create a table. - * @param tableName - * @param families - * @param numVersions * @return An HTable instance for the created table. - * @throws IOException */ public HTable createTable(TableName tableName, byte[][] families, int[] numVersions) @@ -1387,16 +1333,12 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { getHBaseAdmin().createTable(desc); // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned waitUntilAllRegionsAssigned(tableName); - return new HTable(new Configuration(getConfiguration()), tableName); + return (HTable) getConnection().getTable(tableName); } /** * Create a table. - * @param tableName - * @param family - * @param splitRows * @return An HTable instance for the created table. - * @throws IOException */ public HTable createTable(byte[] tableName, byte[] family, byte[][] splitRows) throws IOException{ @@ -1405,11 +1347,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Create a table. - * @param tableName - * @param family - * @param splitRows * @return An HTable instance for the created table. - * @throws IOException */ public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows) throws IOException { @@ -1419,28 +1357,16 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { getHBaseAdmin().createTable(desc, splitRows); // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned waitUntilAllRegionsAssigned(tableName); - return new HTable(getConfiguration(), tableName); + return (HTable) getConnection().getTable(tableName); } /** * Create a table. - * @param tableName - * @param families - * @param splitRows * @return An HTable instance for the created table. - * @throws IOException */ public HTable createTable(byte[] tableName, byte[][] families, byte[][] splitRows) throws IOException { - HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); - for(byte[] family:families) { - HColumnDescriptor hcd = new HColumnDescriptor(family); - desc.addFamily(hcd); - } - getHBaseAdmin().createTable(desc, splitRows); - // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned - waitUntilAllRegionsAssigned(TableName.valueOf(tableName)); - return new HTable(getConfiguration(), tableName); + return createTable(TableName.valueOf(tableName), families, splitRows); } /** @@ -1488,7 +1414,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET); /** - * Create a table of name name with {@link COLUMNS} for + * Create a table of name name with {@link #COLUMNS} for * families. * @param name Name to give table. * @param versions How many versions to allow per column. @@ -1510,7 +1436,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } /** - * Create a table of name name with {@link COLUMNS} for + * Create a table of name name with {@link #COLUMNS} for * families. * @param name Name to give table. * @return Column descriptor. @@ -2371,6 +2297,19 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { return hbaseCluster; } + /** This is intended to be an instance of {@link HConnectionImplForTests} */ + private HConnection conn = null; + public synchronized HConnection getConnection() throws IOException { + if (conn == null) { + // inject our customed HConnection, one that doesn't close when requested. + Configuration c = new Configuration(getConfiguration()); + c.set("hbase.client.connection.impl", + "org.apache.hadoop.hbase.client.HConnectionImplForTests"); + conn = HConnectionManager.createConnection(c); + } + return conn; + } + /** * Returns a HBaseAdmin instance. * This instance is shared between HBaseTestingUtility instance users. @@ -2383,15 +2322,14 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { public synchronized HBaseAdmin getHBaseAdmin() throws IOException { if (hbaseAdmin == null){ - hbaseAdmin = new HBaseAdminForTests(getConfiguration()); + hbaseAdmin = new HBaseAdminForTests(getConnection()); } return hbaseAdmin; } private HBaseAdminForTests hbaseAdmin = null; private static class HBaseAdminForTests extends HBaseAdmin { - public HBaseAdminForTests(Configuration c) throws MasterNotRunningException, - ZooKeeperConnectionException, IOException { + public HBaseAdminForTests(HConnection c) throws IOException { super(c); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java index 834fc51..968ce49 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -29,7 +29,6 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; @@ -494,7 +493,6 @@ public class MiniHBaseCluster extends HBaseCluster { if (this.hbaseCluster != null) { this.hbaseCluster.shutdown(); } - HConnectionManager.deleteAllConnections(false); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ServerResourceCheckerJUnitListener.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ServerResourceCheckerJUnitListener.java deleted file mode 100644 index 4e01b5e..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ServerResourceCheckerJUnitListener.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase; - -import org.apache.hadoop.hbase.ResourceChecker.Phase; -import org.apache.hadoop.hbase.client.HConnectionTestingUtility; - -/** - * Monitor the resources. use by the tests All resources in {@link ResourceCheckerJUnitListener} - * plus the number of connection. - */ -public class ServerResourceCheckerJUnitListener extends ResourceCheckerJUnitListener { - - static class ConnectionCountResourceAnalyzer extends ResourceChecker.ResourceAnalyzer { - @Override - public int getVal(Phase phase) { - return HConnectionTestingUtility.getConnectionCount(); - } - } - - @Override - protected void addResourceAnalyzer(ResourceChecker rc) { - rc.addResourceAnalyzer(new ConnectionCountResourceAnalyzer()); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java index 3a31c15..5288439 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java @@ -69,8 +69,6 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; - - @Category(LargeTests.class) public class TestZooKeeper { private final Log LOG = LogFactory.getLog(this.getClass()); @@ -140,7 +138,7 @@ public class TestZooKeeper { // We don't want to share the connection as we will check its state c.set(HConstants.HBASE_CLIENT_INSTANCE_ID, "1111"); - HConnection connection = HConnectionManager.getConnection(c); + HConnection connection = HConnectionManager.createConnection(c); ZooKeeperWatcher connectionZK = getZooKeeperWatcher(connection); LOG.info("ZooKeeperWatcher= 0x"+ Integer.toHexString( @@ -280,13 +278,15 @@ public class TestZooKeeper { ipMeta.exists(new Get(row)); // make sure they aren't the same - ZooKeeperWatcher z1 = - getZooKeeperWatcher(HConnectionManager.getConnection(localMeta.getConfiguration())); - ZooKeeperWatcher z2 = - getZooKeeperWatcher(HConnectionManager.getConnection(otherConf)); + HConnection c1 = HConnectionManager.createConnection(localMeta.getConfiguration()); + ZooKeeperWatcher z1 = getZooKeeperWatcher(c1); + HConnection c2 = HConnectionManager.createConnection(otherConf); + ZooKeeperWatcher z2 = getZooKeeperWatcher(c2); assertFalse(z1 == z2); assertFalse(z1.getQuorum().equals(z2.getQuorum())); + c1.close(); + c2.close(); localMeta.close(); ipMeta.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java index caeafbd..6008b8f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; @@ -111,9 +110,6 @@ public class TestCatalogTracker { LOG.warn("Unable to delete hbase:meta location", e); } - // Clear out our doctored connection or could mess up subsequent tests. - HConnectionManager.deleteConnection(UTIL.getConfiguration()); - this.watcher.close(); } @@ -303,8 +299,7 @@ public class TestCatalogTracker { * {@link HConnection#getAdmin(ServerName)} is called, returns the passed * {@link ClientProtos.ClientService.BlockingInterface} instance when * {@link HConnection#getClient(ServerName)} is called (Be sure to call - * {@link HConnectionManager#deleteConnection(org.apache.hadoop.conf.Configuration)} - * when done with this mocked Connection. + * {@link HConnection#close()} when done with this mocked Connection. * @throws IOException */ private HConnection mockConnection(final AdminProtos.AdminService.BlockingInterface admin, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java index 6ae0ecd..66cad6d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; -import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; @@ -133,13 +132,14 @@ public class TestMetaReaderEditorNoCluster { @Test public void testRideOverServerNotRunning() throws IOException, InterruptedException, ServiceException { + // TODO: fix me! // Need a zk watcher. ZooKeeperWatcher zkw = new ZooKeeperWatcher(UTIL.getConfiguration(), this.getClass().getSimpleName(), ABORTABLE, true); // This is a servername we use in a few places below. ServerName sn = ServerName.valueOf("example.com", 1234, System.currentTimeMillis()); - HConnection connection; + HConnection connection = null; CatalogTracker ct = null; try { // Mock an ClientProtocol. Our mock implementation will fail a few @@ -185,7 +185,7 @@ public class TestMetaReaderEditorNoCluster { // Associate a spied-upon HConnection with UTIL.getConfiguration. Need // to shove this in here first so it gets picked up all over; e.g. by // HTable. - connection = HConnectionTestingUtility.getSpiedConnection(UTIL.getConfiguration()); + connection = Mockito.spy(HConnectionManager.createConnection(UTIL.getConfiguration())); // Fix the location lookup so it 'works' though no network. First // make an 'any location' object. final HRegionLocation anyLocation = @@ -218,7 +218,7 @@ public class TestMetaReaderEditorNoCluster { scan((RpcController)Mockito.any(), (ScanRequest)Mockito.any()); } finally { if (ct != null) ct.stop(); - HConnectionManager.deleteConnection(UTIL.getConfiguration()); + if (connection != null) connection.close(); zkw.close(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionImplForTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionImplForTests.java new file mode 100644 index 0000000..2a321ee --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionImplForTests.java @@ -0,0 +1,47 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.security.User; + +import java.io.IOException; +import java.util.concurrent.ExecutorService; + +/** + * This implementation defers closure until the harness is shut down. Modeled + * after {@link HBaseTestingUtility.HBaseAdminForTests} + */ +public class HConnectionImplForTests extends HConnectionManager.HConnectionImplementation { + HConnectionImplForTests(Configuration conf, ExecutorService pool, User user) + throws IOException { + super(conf, pool, user); + } + + @Override + public void close() { + LOG.warn("close() called on HConnection instance returned from HBaseTestingUtility.getConnection()"); + } + + /** Actually closes the connection */ + public void close0() { + super.close(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java index b5026ff..ba9c13f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java @@ -43,34 +43,24 @@ public class HConnectionTestingUtility { * Get a Mocked {@link HConnection} that goes with the passed conf * configuration instance. Minimally the mock will return * conf when {@link HConnection#getConfiguration()} is invoked. - * Be sure to shutdown the connection when done by calling - * {@link HConnectionManager#deleteConnection(Configuration)} else it - * will stick around; this is probably not what you want. + * Be sure to shutdown the connection when done by calling {@link HConnection#close()} + * else it will stick around; this is probably not what you want. * @param conf configuration * @return HConnection object for conf * @throws ZooKeeperConnectionException */ public static HConnection getMockedConnection(final Configuration conf) - throws ZooKeeperConnectionException { - HConnectionKey connectionKey = new HConnectionKey(conf); - synchronized (HConnectionManager.CONNECTION_INSTANCES) { - HConnectionImplementation connection = - HConnectionManager.CONNECTION_INSTANCES.get(connectionKey); - if (connection == null) { - connection = Mockito.mock(HConnectionImplementation.class); - Mockito.when(connection.getConfiguration()).thenReturn(conf); - HConnectionManager.CONNECTION_INSTANCES.put(connectionKey, connection); - } - return connection; - } + throws ZooKeeperConnectionException { + HConnectionImplementation connection = Mockito.mock(HConnectionImplementation.class); + Mockito.when(connection.getConfiguration()).thenReturn(conf); + return connection; } /** * Calls {@link #getMockedConnection(Configuration)} and then mocks a few * more of the popular {@link HConnection} methods so they do 'normal' * operation (see return doc below for list). Be sure to shutdown the - * connection when done by calling - * {@link HConnectionManager#deleteConnection(Configuration)} else it + * connection when done by calling {@link HConnection#close()} else it * will stick around; this is probably not what you want. * * @param conf Configuration to use @@ -88,9 +78,7 @@ public class HConnectionTestingUtility { * and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when * {@link HConnection#getAdmin(ServerName)} is called, returns the passed * {@link ClientProtos.ClientService.BlockingInterface} instance when - * {@link HConnection#getClient(ServerName)} is called (Be sure to call - * {@link HConnectionManager#deleteConnection(Configuration)} - * when done with this mocked Connection. + * {@link HConnection#getClient(ServerName)} is called. * @throws IOException */ public static HConnection getMockedConnectionAndDecorate(final Configuration conf, @@ -119,39 +107,4 @@ public class HConnectionTestingUtility { } return c; } - - /** - * Get a Mockito spied-upon {@link HConnection} that goes with the passed - * conf configuration instance. - * Be sure to shutdown the connection when done by calling - * {@link HConnectionManager#deleteConnection(Configuration)} else it - * will stick around; this is probably not what you want. - * @param conf configuration - * @return HConnection object for conf - * @throws ZooKeeperConnectionException - * @see @link - * {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)} - */ - public static HConnection getSpiedConnection(final Configuration conf) - throws IOException { - HConnectionKey connectionKey = new HConnectionKey(conf); - synchronized (HConnectionManager.CONNECTION_INSTANCES) { - HConnectionImplementation connection = - HConnectionManager.CONNECTION_INSTANCES.get(connectionKey); - if (connection == null) { - connection = Mockito.spy(new HConnectionImplementation(conf, true)); - HConnectionManager.CONNECTION_INSTANCES.put(connectionKey, connection); - } - return connection; - } - } - - /** - * @return Count of extant connection instances - */ - public static int getConnectionCount() { - synchronized (HConnectionManager.CONNECTION_INSTANCES) { - return HConnectionManager.CONNECTION_INSTANCES.size(); - } - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java index c010c22..5d53c8e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java @@ -26,8 +26,6 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -58,7 +56,6 @@ import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.catalog.CatalogTracker; -import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.constraint.ConstraintException; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.master.AssignmentManager; @@ -864,7 +861,7 @@ public class TestAdmin { HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); admin.createTable(desc, splitKeys); - HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName); + HTable ht = (HTable) TEST_UTIL.getConnection().getTable(tableName); Map regions = ht.getRegionLocations(); ht.close(); assertEquals("Tried to create " + expectedRegions + " regions " @@ -880,7 +877,7 @@ public class TestAdmin { for (Map.Entry entry : regions.entrySet()) { assertEquals(regions2.get(entry.getKey()), entry.getValue()); } - } + } /** * Multi-family scenario. Tests forcing split from client and @@ -1620,20 +1617,6 @@ public class TestAdmin { } /** - * HBASE-4417 checkHBaseAvailable() doesn't close zk connections - */ - @Test (timeout=300000) - public void testCheckHBaseAvailableClosesConnection() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); - - int initialCount = HConnectionTestingUtility.getConnectionCount(); - HBaseAdmin.checkHBaseAvailable(conf); - int finalCount = HConnectionTestingUtility.getConnectionCount(); - - Assert.assertEquals(initialCount, finalCount) ; - } - - /** * Check that we have an exception if the cluster is not there. */ @Test (timeout=300000) @@ -1644,8 +1627,6 @@ public class TestAdmin { conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 9999)+10); - int initialCount = HConnectionTestingUtility.getConnectionCount(); - long start = System.currentTimeMillis(); try { HBaseAdmin.checkHBaseAvailable(conf); @@ -1657,10 +1638,6 @@ public class TestAdmin { } long end = System.currentTimeMillis(); - int finalCount = HConnectionTestingUtility.getConnectionCount(); - - Assert.assertEquals(initialCount, finalCount) ; - LOG.info("It took "+(end-start)+" ms to find out that" + " HBase was not available"); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 4bfeccd..d6650ae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -104,7 +104,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; /** - * Run tests that use the HBase clients; {@link HTable} and {@link HTablePool}. + * Run tests that use the HBase clients; {@link HTable} and {@link HConnection}. * Sets up the HBase mini cluster once at start and runs through all client tests. * Each creates a table named for the method and does its stuff against that. */ @@ -243,7 +243,7 @@ public class TestFromClientSide { // Then a ZooKeeperKeepAliveConnection HConnectionManager.HConnectionImplementation connection1 = (HConnectionManager.HConnectionImplementation) - HConnectionManager.getConnection(newConfig); + HConnectionManager.createConnection(newConfig); ZooKeeperKeepAliveConnection z1 = connection1.getKeepAliveZooKeeperWatcher(); z1.getRecoverableZooKeeper().getZooKeeper().exists("/z1", false); @@ -265,7 +265,7 @@ public class TestFromClientSide { newConfig2.set(HConstants.HBASE_CLIENT_INSTANCE_ID, "6789"); HConnectionManager.HConnectionImplementation connection2 = (HConnectionManager.HConnectionImplementation) - HConnectionManager.getConnection(newConfig2); + HConnectionManager.createConnection(newConfig2); assertTrue("connections should be different ", connection1 != connection2); @@ -291,7 +291,7 @@ public class TestFromClientSide { z4.getRecoverableZooKeeper().getZooKeeper().exists("/z4", false); - HConnectionManager.deleteConnection(newConfig); + connection1.close(); try { z2.getRecoverableZooKeeper().getZooKeeper().exists("/z2", false); assertTrue("We should not have a valid connection for z2", false); @@ -302,7 +302,7 @@ public class TestFromClientSide { // We expect success here. - HConnectionManager.deleteConnection(newConfig2); + connection2.close(); try { z4.getRecoverableZooKeeper().getZooKeeper().exists("/z4", false); assertTrue("We should not have a valid connection for z4", false); @@ -316,8 +316,7 @@ public class TestFromClientSide { @Test public void testRegionCachePreWarm() throws Exception { LOG.info("Starting testRegionCachePreWarm"); - final TableName TABLENAME = - TableName.valueOf("testCachePrewarm"); + final TableName TABLENAME = TableName.valueOf("testCachePrewarm"); Configuration conf = TEST_UTIL.getConfiguration(); // Set up test table: diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java index f1c9607..bea2146 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java @@ -113,8 +113,7 @@ public class TestFromClientSide3 { byte[] row, byte[] family, int nFlushes, int nPuts) throws Exception { // connection needed for poll-wait - HConnection conn = HConnectionManager.getConnection(TEST_UTIL - .getConfiguration()); + HConnection conn = HConnectionManager.createConnection(TEST_UTIL.getConfiguration()); HRegionLocation loc = table.getRegionLocation(row, true); AdminProtos.AdminService.BlockingInterface server = conn.getAdmin(loc.getServerName()); byte[] regName = loc.getRegionInfo().getRegionName(); @@ -133,6 +132,7 @@ public class TestFromClientSide3 { Thread.sleep(40); } } + conn.close(); } // override the config settings at the CF level and ensure priority @@ -154,8 +154,7 @@ public class TestFromClientSide3 { TableName.valueOf(tableName); HTable hTable = TEST_UTIL.createTable(TABLE, FAMILY, 10); HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); - HConnection connection = HConnectionManager.getConnection(TEST_UTIL - .getConfiguration()); + HConnection connection = HConnectionManager.createConnection(TEST_UTIL.getConfiguration()); // Create 3 store files. byte[] row = Bytes.toBytes(random.nextInt()); @@ -265,6 +264,7 @@ public class TestFromClientSide3 { LOG.info("alter status finished"); assertNull(hTable.getTableDescriptor().getFamily(FAMILY).getValue( "hbase.hstore.compaction.min")); + connection.close(); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java index ea92bd5..e5f2ac5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java @@ -27,16 +27,13 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.SmallTests; -import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest; import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; import org.mortbay.log.Log; -import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; @@ -47,13 +44,10 @@ public class TestHBaseAdminNoCluster { * Verify that PleaseHoldException gets retried. * HBASE-8764 * @throws IOException - * @throws ZooKeeperConnectionException - * @throws MasterNotRunningException - * @throws ServiceException + * @throws ServiceException */ @Test - public void testMasterMonitorCollableRetries() - throws MasterNotRunningException, ZooKeeperConnectionException, IOException, ServiceException { + public void testMasterMonitorCollableRetries() throws IOException, ServiceException { Configuration configuration = HBaseConfiguration.create(); // Set the pause and retry count way down. configuration.setLong(HConstants.HBASE_CLIENT_PAUSE, 1); @@ -71,7 +65,7 @@ public class TestHBaseAdminNoCluster { thenThrow(new ServiceException("Test fail").initCause(new PleaseHoldException("test"))); Mockito.when(connection.getKeepAliveMasterService()).thenReturn(masterAdmin); // Mock up our admin Interfaces - HBaseAdmin admin = new HBaseAdmin(configuration); + HBaseAdmin admin = new HBaseAdmin(connection); try { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testMasterMonitorCollableRetries")); @@ -87,7 +81,7 @@ public class TestHBaseAdminNoCluster { (CreateTableRequest)Mockito.any()); } finally { admin.close(); - if (connection != null)HConnectionManager.deleteConnection(configuration); + if (connection != null) connection.close(); } } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java index 37a59db..9e8fa4c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java @@ -23,16 +23,12 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; import java.io.IOException; import java.lang.reflect.Field; import java.lang.reflect.Modifier; import java.net.SocketTimeoutException; -import java.util.ArrayList; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.Random; import java.util.concurrent.ExecutorService; import java.util.concurrent.SynchronousQueue; @@ -40,10 +36,10 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import com.sun.el.util.ReflectionUtil; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -65,8 +61,8 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; +import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -108,11 +104,6 @@ public class TestHCM { TEST_UTIL.shutdownMiniCluster(); } - - private static int getHConnectionManagerCacheSize(){ - return HConnectionTestingUtility.getConnectionCount(); - } - @Test public void testClusterConnection() throws IOException { ThreadPoolExecutor otherPool = new ThreadPoolExecutor(1, 1, @@ -287,7 +278,6 @@ public class TestHCM { } finally { syncBlockingFilter.set(true); t.join(); - HConnectionManager.getConnection(c2).close(); TEST_UTIL.getHBaseAdmin().setBalancerRunning(previousBalance, true); } @@ -315,28 +305,6 @@ public class TestHCM { } } - @Test - public void abortingHConnectionRemovesItselfFromHCM() throws Exception { - // Save off current HConnections - Map oldHBaseInstances = - new HashMap(); - oldHBaseInstances.putAll(HConnectionManager.CONNECTION_INSTANCES); - - HConnectionManager.CONNECTION_INSTANCES.clear(); - - try { - HConnection connection = HConnectionManager.getConnection(TEST_UTIL.getConfiguration()); - connection.abort("test abortingHConnectionRemovesItselfFromHCM", new Exception( - "test abortingHConnectionRemovesItselfFromHCM")); - Assert.assertNotSame(connection, - HConnectionManager.getConnection(TEST_UTIL.getConfiguration())); - } finally { - // Put original HConnections back - HConnectionManager.CONNECTION_INSTANCES.clear(); - HConnectionManager.CONNECTION_INSTANCES.putAll(oldHBaseInstances); - } - } - /** * Test that when we delete a location using the first row of a region * that we really delete it. @@ -586,141 +554,6 @@ public class TestHCM { } /** - * Make sure that {@link Configuration} instances that are essentially the - * same map to the same {@link HConnection} instance. - */ - @Test - public void testConnectionSameness() throws Exception { - HConnection previousConnection = null; - for (int i = 0; i < 2; i++) { - // set random key to differentiate the connection from previous ones - Configuration configuration = TEST_UTIL.getConfiguration(); - configuration.set("some_key", String.valueOf(_randy.nextInt())); - LOG.info("The hash code of the current configuration is: " - + configuration.hashCode()); - HConnection currentConnection = HConnectionManager - .getConnection(configuration); - if (previousConnection != null) { - assertTrue( - "Did not get the same connection even though its key didn't change", - previousConnection == currentConnection); - } - previousConnection = currentConnection; - // change the configuration, so that it is no longer reachable from the - // client's perspective. However, since its part of the LRU doubly linked - // list, it will eventually get thrown out, at which time it should also - // close the corresponding {@link HConnection}. - configuration.set("other_key", String.valueOf(_randy.nextInt())); - } - } - - /** - * Makes sure that there is no leaking of - * {@link HConnectionManager.HConnectionImplementation} in the {@link HConnectionManager} - * class. - */ - @Test - public void testConnectionUniqueness() throws Exception { - int zkmaxconnections = TEST_UTIL.getConfiguration(). - getInt(HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS, - HConstants.DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS); - // Test up to a max that is < the maximum number of zk connections. If we - // go above zk connections, we just fall into cycle where we are failing - // to set up a session and test runs for a long time. - int maxConnections = Math.min(zkmaxconnections - 1, 20); - List connections = new ArrayList(maxConnections); - HConnection previousConnection = null; - try { - for (int i = 0; i < maxConnections; i++) { - // set random key to differentiate the connection from previous ones - Configuration configuration = new Configuration(TEST_UTIL.getConfiguration()); - configuration.set("some_key", String.valueOf(_randy.nextInt())); - configuration.set(HConstants.HBASE_CLIENT_INSTANCE_ID, - String.valueOf(_randy.nextInt())); - LOG.info("The hash code of the current configuration is: " - + configuration.hashCode()); - HConnection currentConnection = - HConnectionManager.getConnection(configuration); - if (previousConnection != null) { - assertTrue("Got the same connection even though its key changed!", - previousConnection != currentConnection); - } - // change the configuration, so that it is no longer reachable from the - // client's perspective. However, since its part of the LRU doubly linked - // list, it will eventually get thrown out, at which time it should also - // close the corresponding {@link HConnection}. - configuration.set("other_key", String.valueOf(_randy.nextInt())); - - previousConnection = currentConnection; - LOG.info("The current HConnectionManager#HBASE_INSTANCES cache size is: " - + getHConnectionManagerCacheSize()); - Thread.sleep(50); - connections.add(currentConnection); - } - } finally { - for (HConnection c: connections) { - // Clean up connections made so we don't interfere w/ subsequent tests. - HConnectionManager.deleteConnection(c.getConfiguration()); - } - } - } - - @Test - public void testClosing() throws Exception { - Configuration configuration = - new Configuration(TEST_UTIL.getConfiguration()); - configuration.set(HConstants.HBASE_CLIENT_INSTANCE_ID, - String.valueOf(_randy.nextInt())); - - HConnection c1 = HConnectionManager.createConnection(configuration); - // We create two connections with the same key. - HConnection c2 = HConnectionManager.createConnection(configuration); - - HConnection c3 = HConnectionManager.getConnection(configuration); - HConnection c4 = HConnectionManager.getConnection(configuration); - assertTrue(c3 == c4); - - c1.close(); - assertTrue(c1.isClosed()); - assertFalse(c2.isClosed()); - assertFalse(c3.isClosed()); - - c3.close(); - // still a reference left - assertFalse(c3.isClosed()); - c3.close(); - assertTrue(c3.isClosed()); - // c3 was removed from the cache - HConnection c5 = HConnectionManager.getConnection(configuration); - assertTrue(c5 != c3); - - assertFalse(c2.isClosed()); - c2.close(); - assertTrue(c2.isClosed()); - c5.close(); - assertTrue(c5.isClosed()); - } - - /** - * Trivial test to verify that nobody messes with - * {@link HConnectionManager#createConnection(Configuration)} - */ - @Test - public void testCreateConnection() throws Exception { - Configuration configuration = TEST_UTIL.getConfiguration(); - HConnection c1 = HConnectionManager.createConnection(configuration); - HConnection c2 = HConnectionManager.createConnection(configuration); - // created from the same configuration, yet they are different - assertTrue(c1 != c2); - assertTrue(c1.getConfiguration() == c2.getConfiguration()); - // make sure these were not cached - HConnection c3 = HConnectionManager.getConnection(configuration); - assertTrue(c1 != c3); - assertTrue(c2 != c3); - } - - - /** * This test checks that one can connect to the cluster with only the * ZooKeeper quorum set. Other stuff like master address will be read * from ZK by the client. @@ -735,13 +568,13 @@ public class TestHCM { TEST_UTIL.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT)); // This should be enough to connect - HConnection conn = HConnectionManager.getConnection(c); + HConnection conn = HConnectionManager.createConnection(c); assertTrue( conn.isMasterRunning() ); conn.close(); } private int setNumTries(HConnectionImplementation hci, int newVal) throws Exception { - Field numTries = hci.getClass().getDeclaredField("numTries"); + Field numTries = ReflectionUtils.getAllDeclaredField(hci.getClass(), "numTries"); numTries.setAccessible(true); Field modifiersField = Field.class.getDeclaredField("modifiers"); modifiersField.setAccessible(true); @@ -757,8 +590,7 @@ public class TestHCM { HTable table = TEST_UTIL.createTable(TABLE_NAME3, FAM_NAM); TEST_UTIL.createMultiRegions(table, FAM_NAM); HConnectionManager.HConnectionImplementation conn = - (HConnectionManager.HConnectionImplementation) - HConnectionManager.getConnection(TEST_UTIL.getConfiguration()); + (HConnectionManager.HConnectionImplementation) TEST_UTIL.getConnection(); // We're now going to move the region and check that it works for the client // First a new put to add the location in the cache @@ -779,6 +611,7 @@ public class TestHCM { // Now moving the region to the second server HRegionLocation toMove = conn.getCachedLocation(TABLE_NAME3, ROW_X); + assertNotNull(toMove); byte[] regionName = toMove.getRegionInfo().getRegionName(); byte[] encodedRegionNameBytes = toMove.getRegionInfo().getEncodedNameAsBytes(); @@ -937,84 +770,5 @@ public class TestHCM { assertTrue("Value not within jitter: " + expected + " vs " + actual, Math.abs(actual - expected) <= (0.01f * jitterBase)); } - - /** - * Tests that a destroyed connection does not have a live zookeeper. - * Below is timing based. We put up a connection to a table and then close the connection while - * having a background thread running that is forcing close of the connection to try and - * provoke a close catastrophe; we are hoping for a car crash so we can see if we are leaking - * zk connections. - * @throws Exception - */ - @Ignore ("Flakey test: See HBASE-8996")@Test - public void testDeleteForZKConnLeak() throws Exception { - TEST_UTIL.createTable(TABLE_NAME4, FAM_NAM); - final Configuration config = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); - config.setInt("zookeeper.recovery.retry", 1); - config.setInt("zookeeper.recovery.retry.intervalmill", 1000); - config.setInt("hbase.rpc.timeout", 2000); - config.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); - - ThreadPoolExecutor pool = new ThreadPoolExecutor(1, 10, - 5, TimeUnit.SECONDS, - new SynchronousQueue(), - Threads.newDaemonThreadFactory("test-hcm-delete")); - - pool.submit(new Runnable() { - @Override - public void run() { - while (!Thread.interrupted()) { - try { - HConnection conn = HConnectionManager.getConnection(config); - LOG.info("Connection " + conn); - HConnectionManager.deleteStaleConnection(conn); - LOG.info("Connection closed " + conn); - // TODO: This sleep time should be less than the time that it takes to open and close - // a table. Ideally we would do a few runs first to measure. For now this is - // timing based; hopefully we hit the bad condition. - Threads.sleep(10); - } catch (Exception e) { - } - } - } - }); - - // Use connection multiple times. - for (int i = 0; i < 30; i++) { - HConnection c1 = null; - try { - c1 = HConnectionManager.getConnection(config); - LOG.info("HTable connection " + i + " " + c1); - HTable table = new HTable(TABLE_NAME4, c1, pool); - table.close(); - LOG.info("HTable connection " + i + " closed " + c1); - } catch (Exception e) { - LOG.info("We actually want this to happen!!!! So we can see if we are leaking zk", e); - } finally { - if (c1 != null) { - if (c1.isClosed()) { - // cannot use getZooKeeper as method instantiates watcher if null - Field zkwField = c1.getClass().getDeclaredField("keepAliveZookeeper"); - zkwField.setAccessible(true); - Object watcher = zkwField.get(c1); - - if (watcher != null) { - if (((ZooKeeperWatcher)watcher).getRecoverableZooKeeper().getState().isAlive()) { - // non-synchronized access to watcher; sleep and check again in case zk connection - // hasn't been cleaned up yet. - Thread.sleep(1000); - if (((ZooKeeperWatcher) watcher).getRecoverableZooKeeper().getState().isAlive()) { - pool.shutdownNow(); - fail("Live zookeeper in closed connection"); - } - } - } - } - c1.close(); - } - } - } - pool.shutdownNow(); - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTablePool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTablePool.java deleted file mode 100644 index 322ebaa..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTablePool.java +++ /dev/null @@ -1,364 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.IOException; - -import org.apache.hadoop.hbase.*; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.PoolMap.PoolType; -import org.junit.*; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Suite; - -/** - * Tests HTablePool. - */ -@RunWith(Suite.class) -@Suite.SuiteClasses({TestHTablePool.TestHTableReusablePool.class, TestHTablePool.TestHTableThreadLocalPool.class}) -@Category(MediumTests.class) -public class TestHTablePool { - private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private final static byte[] TABLENAME = Bytes.toBytes("TestHTablePool"); - - public abstract static class TestHTablePoolType { - - @BeforeClass - public static void setUpBeforeClass() throws Exception { - TEST_UTIL.startMiniCluster(1); - TEST_UTIL.createTable(TABLENAME, HConstants.CATALOG_FAMILY); - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - protected abstract PoolType getPoolType(); - - @Test - public void testTableWithStringName() throws Exception { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), - Integer.MAX_VALUE, getPoolType()); - String tableName = Bytes.toString(TABLENAME); - - // Request a table from an empty pool - HTableInterface table = pool.getTable(tableName); - Assert.assertNotNull(table); - - // Close table (returns table to the pool) - table.close(); - - // Request a table of the same name - HTableInterface sameTable = pool.getTable(tableName); - Assert.assertSame( - ((HTablePool.PooledHTable) table).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable).getWrappedTable()); - } - - @Test - public void testTableWithByteArrayName() throws IOException { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), - Integer.MAX_VALUE, getPoolType()); - - // Request a table from an empty pool - HTableInterface table = pool.getTable(TABLENAME); - Assert.assertNotNull(table); - - // Close table (returns table to the pool) - table.close(); - - // Request a table of the same name - HTableInterface sameTable = pool.getTable(TABLENAME); - Assert.assertSame( - ((HTablePool.PooledHTable) table).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable).getWrappedTable()); - } - - @Test - public void testTablesWithDifferentNames() throws IOException { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), - Integer.MAX_VALUE, getPoolType()); - // We add the class to the table name as the HBase cluster is reused - // during the tests: this gives naming unicity. - byte[] otherTable = Bytes.toBytes( - "OtherTable_" + getClass().getSimpleName() - ); - TEST_UTIL.createTable(otherTable, HConstants.CATALOG_FAMILY); - - // Request a table from an empty pool - HTableInterface table1 = pool.getTable(TABLENAME); - HTableInterface table2 = pool.getTable(otherTable); - Assert.assertNotNull(table2); - - // Close tables (returns tables to the pool) - table1.close(); - table2.close(); - - // Request tables of the same names - HTableInterface sameTable1 = pool.getTable(TABLENAME); - HTableInterface sameTable2 = pool.getTable(otherTable); - Assert.assertSame( - ((HTablePool.PooledHTable) table1).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable1).getWrappedTable()); - Assert.assertSame( - ((HTablePool.PooledHTable) table2).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable2).getWrappedTable()); - } - @Test - public void testProxyImplementationReturned() { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), - Integer.MAX_VALUE); - String tableName = Bytes.toString(TABLENAME);// Request a table from - // an - // empty pool - HTableInterface table = pool.getTable(tableName); - - // Test if proxy implementation is returned - Assert.assertTrue(table instanceof HTablePool.PooledHTable); - } - - @Test - public void testDeprecatedUsagePattern() throws IOException { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), - Integer.MAX_VALUE); - String tableName = Bytes.toString(TABLENAME);// Request a table from - // an - // empty pool - - // get table will return proxy implementation - HTableInterface table = pool.getTable(tableName); - - // put back the proxy implementation instead of closing it - pool.putTable(table); - - // Request a table of the same name - HTableInterface sameTable = pool.getTable(tableName); - - // test no proxy over proxy created - Assert.assertSame(((HTablePool.PooledHTable) table).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable).getWrappedTable()); - } - - @Test - public void testReturnDifferentTable() throws IOException { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), - Integer.MAX_VALUE); - String tableName = Bytes.toString(TABLENAME);// Request a table from - // an - // empty pool - - // get table will return proxy implementation - final HTableInterface table = pool.getTable(tableName); - HTableInterface alienTable = new HTable(TEST_UTIL.getConfiguration(), - TABLENAME) { - // implementation doesn't matter as long the table is not from - // pool - }; - try { - // put the wrong table in pool - pool.putTable(alienTable); - Assert.fail("alien table accepted in pool"); - } catch (IllegalArgumentException e) { - Assert.assertTrue("alien table rejected", true); - } - } - - @Test - public void testHTablePoolCloseTwice() throws Exception { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), - Integer.MAX_VALUE, getPoolType()); - String tableName = Bytes.toString(TABLENAME); - - // Request a table from an empty pool - HTableInterface table = pool.getTable(tableName); - Assert.assertNotNull(table); - Assert.assertTrue(((HTablePool.PooledHTable) table).isOpen()); - // Close table (returns table to the pool) - table.close(); - // check if the table is closed - Assert.assertFalse(((HTablePool.PooledHTable) table).isOpen()); - try { - table.close(); - Assert.fail("Should not allow table to be closed twice"); - } catch (IllegalStateException ex) { - Assert.assertTrue("table cannot be closed twice", true); - } finally { - pool.close(); - } - - } - - - - } - - @Category(MediumTests.class) - public static class TestHTableReusablePool extends TestHTablePoolType { - @Override - protected PoolType getPoolType() { - return PoolType.Reusable; - } - - @Test - public void testTableWithMaxSize() throws Exception { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), 2, - getPoolType()); - - // Request tables from an empty pool - HTableInterface table1 = pool.getTable(TABLENAME); - HTableInterface table2 = pool.getTable(TABLENAME); - HTableInterface table3 = pool.getTable(TABLENAME); - - // Close tables (returns tables to the pool) - table1.close(); - table2.close(); - // The pool should reject this one since it is already full - table3.close(); - - // Request tables of the same name - HTableInterface sameTable1 = pool.getTable(TABLENAME); - HTableInterface sameTable2 = pool.getTable(TABLENAME); - HTableInterface sameTable3 = pool.getTable(TABLENAME); - Assert.assertSame( - ((HTablePool.PooledHTable) table1).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable1).getWrappedTable()); - Assert.assertSame( - ((HTablePool.PooledHTable) table2).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable2).getWrappedTable()); - Assert.assertNotSame( - ((HTablePool.PooledHTable) table3).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable3).getWrappedTable()); - } - - @Test - public void testCloseTablePool() throws IOException { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), 4, - getPoolType()); - HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); - - if (admin.tableExists(TABLENAME)) { - admin.disableTable(TABLENAME); - admin.deleteTable(TABLENAME); - } - - HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(TABLENAME)); - tableDescriptor.addFamily(new HColumnDescriptor("randomFamily")); - admin.createTable(tableDescriptor); - - // Request tables from an empty pool - HTableInterface[] tables = new HTableInterface[4]; - for (int i = 0; i < 4; ++i) { - tables[i] = pool.getTable(TABLENAME); - } - - pool.closeTablePool(TABLENAME); - - for (int i = 0; i < 4; ++i) { - tables[i].close(); - } - - Assert.assertEquals(4, - pool.getCurrentPoolSize(Bytes.toString(TABLENAME))); - - pool.closeTablePool(TABLENAME); - - Assert.assertEquals(0, - pool.getCurrentPoolSize(Bytes.toString(TABLENAME))); - } - } - - @Category(MediumTests.class) - public static class TestHTableThreadLocalPool extends TestHTablePoolType { - @Override - protected PoolType getPoolType() { - return PoolType.ThreadLocal; - } - - @Test - public void testTableWithMaxSize() throws Exception { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), 2, - getPoolType()); - - // Request tables from an empty pool - HTableInterface table1 = pool.getTable(TABLENAME); - HTableInterface table2 = pool.getTable(TABLENAME); - HTableInterface table3 = pool.getTable(TABLENAME); - - // Close tables (returns tables to the pool) - table1.close(); - table2.close(); - // The pool should not reject this one since the number of threads - // <= 2 - table3.close(); - - // Request tables of the same name - HTableInterface sameTable1 = pool.getTable(TABLENAME); - HTableInterface sameTable2 = pool.getTable(TABLENAME); - HTableInterface sameTable3 = pool.getTable(TABLENAME); - Assert.assertSame( - ((HTablePool.PooledHTable) table3).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable1).getWrappedTable()); - Assert.assertSame( - ((HTablePool.PooledHTable) table3).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable2).getWrappedTable()); - Assert.assertSame( - ((HTablePool.PooledHTable) table3).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable3).getWrappedTable()); - } - - @Test - public void testCloseTablePool() throws IOException { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), 4, - getPoolType()); - HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); - - if (admin.tableExists(TABLENAME)) { - admin.disableTable(TABLENAME); - admin.deleteTable(TABLENAME); - } - - HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(TABLENAME)); - tableDescriptor.addFamily(new HColumnDescriptor("randomFamily")); - admin.createTable(tableDescriptor); - - // Request tables from an empty pool - HTableInterface[] tables = new HTableInterface[4]; - for (int i = 0; i < 4; ++i) { - tables[i] = pool.getTable(TABLENAME); - } - - pool.closeTablePool(TABLENAME); - - for (int i = 0; i < 4; ++i) { - tables[i].close(); - } - - Assert.assertEquals(1, - pool.getCurrentPoolSize(Bytes.toString(TABLENAME))); - - pool.closeTablePool(TABLENAME); - - Assert.assertEquals(0, - pool.getCurrentPoolSize(Bytes.toString(TABLENAME))); - } - } - -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java index 8cdaef6..541a63e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java @@ -95,7 +95,7 @@ public class TestMultiParallel { // Wait until completing balance UTIL.waitFor(15 * 1000, UTIL.predicateNoRegionsInTransition()); } - HConnection conn = HConnectionManager.getConnection(UTIL.getConfiguration()); + HConnection conn = HConnectionManager.createConnection(UTIL.getConfiguration()); conn.clearRegionCache(); conn.close(); LOG.info("before done"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java index 966a594..d72e76c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java @@ -173,7 +173,7 @@ public class TestRestoreSnapshotFromClient { public void testRestoreSchemaChange() throws Exception { byte[] TEST_FAMILY2 = Bytes.toBytes("cf2"); - HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName); + HTable table = (HTable) TEST_UTIL.getConnection().getTable(tableName); // Add one column family and put some data in it admin.disableTable(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 9037bb3..5cecdd1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -165,7 +165,11 @@ public class TestCatalogJanitor { this.ct.stop(); } if (this.connection != null) { - HConnectionManager.deleteConnection(this.connection.getConfiguration()); + try { + this.connection.close(); + } catch (IOException e) { + // TODO: should be more careful here? + } } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java index dc0d892..8e300b5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java @@ -98,8 +98,7 @@ public class TestEndToEndSplitTransaction { byte []firstRow = Bytes.toBytes("aaa"); byte []splitRow = Bytes.toBytes("lll"); byte []lastRow = Bytes.toBytes("zzz"); - HConnection con = HConnectionManager - .getConnection(TEST_UTIL.getConfiguration()); + HConnection con = HConnectionManager.createConnection(TEST_UTIL.getConfiguration()); // this will also cache the region byte[] regionName = con.locateRegion(tableName, splitRow).getRegionInfo() .getRegionName(); @@ -142,6 +141,8 @@ public class TestEndToEndSplitTransaction { regions.getSecond()); assertTrue(test(con, tableName, firstRow, server)); assertTrue(test(con, tableName, lastRow, server)); + + con.close(); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java index f8ff7af..ecf78e4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java @@ -2110,7 +2110,7 @@ public class TestHBaseFsck { private void deleteMetaRegion(Configuration conf, boolean unassign, boolean hdfs, boolean regionInfoOnly) throws IOException, InterruptedException { - HConnection connection = HConnectionManager.getConnection(conf); + HConnection connection = HConnectionManager.createConnection(conf); HRegionLocation metaLocation = connection.locateRegion(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW); ServerName hsa = ServerName.valueOf(metaLocation.getHostnamePort(), 0L); @@ -2141,6 +2141,8 @@ public class TestHBaseFsck { LOG.info("Deleted " + p + " sucessfully? " + success); HBaseFsck.debugLsr(conf, p); } + + connection.close(); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java index 4a5f84d..c25c65c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -110,7 +109,6 @@ public class OfflineMetaRebuildTestCore { @After public void tearDownAfter() throws Exception { TEST_UTIL.shutdownMiniCluster(); - HConnectionManager.deleteConnection(conf); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java index 1a85bc5..d73b635 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java @@ -58,7 +58,6 @@ public class TestOfflineMetaRebuildBase extends OfflineMetaRebuildTestCore { // shutdown the minicluster TEST_UTIL.shutdownMiniHBaseCluster(); TEST_UTIL.shutdownMiniZKCluster(); - HConnectionManager.deleteConnection(conf); // rebuild meta table from scratch HBaseFsck fsck = new HBaseFsck(conf); diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml index bc1a47d..81b6a91 100644 --- a/hbase-shell/pom.xml +++ b/hbase-shell/pom.xml @@ -131,7 +131,7 @@ listener - org.apache.hadoop.hbase.ServerResourceCheckerJUnitListener + org.apache.hadoop.hbase.ResourceCheckerJUnitListener diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java index e469f6a..12c730d 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java @@ -37,10 +37,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HTableInterface; -import org.apache.hadoop.hbase.client.HTablePool; import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.thrift.ThriftMetrics; import org.apache.hadoop.hbase.thrift2.generated.*; import org.apache.thrift.TException; @@ -52,17 +52,17 @@ import org.apache.thrift.TException; @InterfaceAudience.Private public class ThriftHBaseServiceHandler implements THBaseService.Iface { - // TODO: Size of pool configuraple - private final HTablePool htablePool; private static final Log LOG = LogFactory.getLog(ThriftHBaseServiceHandler.class); + private final HConnection conn; + // nextScannerId and scannerMap are used to manage scanner state // TODO: Cleanup thread for Scanners, Scanner id wrap private final AtomicInteger nextScannerId = new AtomicInteger(0); private final Map scannerMap = new ConcurrentHashMap(); - public static THBaseService.Iface newInstance(Configuration conf, ThriftMetrics metrics) { + public static THBaseService.Iface newInstance(Configuration conf, ThriftMetrics metrics) throws IOException { THBaseService.Iface handler = new ThriftHBaseServiceHandler(conf); return (THBaseService.Iface) Proxy.newProxyInstance(handler.getClass().getClassLoader(), new Class[] { THBaseService.Iface.class }, new THBaseServiceMetricsProxy(handler, metrics)); @@ -98,18 +98,19 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { return System.nanoTime(); } - ThriftHBaseServiceHandler(Configuration conf) { - int maxPoolSize = conf.getInt("hbase.thrift.htablepool.size.max", 1000); - htablePool = new HTablePool(conf, maxPoolSize); + ThriftHBaseServiceHandler(Configuration conf) throws IOException { + if (conf.getInt("hbase.thrift.htablepool.size.max", -1) != -1) + LOG.warn("hbase.thrift.htablepool.size.max is no longer supported. See HBASE-9117."); + conn = HConnectionManager.createConnection(conf); } - private HTableInterface getTable(ByteBuffer tableName) { - return htablePool.getTable(byteBufferToByteArray(tableName)); + private HTableInterface getTable(ByteBuffer tableName) throws IOException { + return conn.getTable(byteBufferToByteArray(tableName)); } private void closeTable(HTableInterface table) throws TIOError { try { - table.close(); + if (table != null) table.close(); } catch (IOException e) { throw getTIOError(e); } @@ -152,8 +153,9 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public boolean exists(ByteBuffer table, TGet get) throws TIOError, TException { - HTableInterface htable = getTable(table); + HTableInterface htable = null; try { + htable = getTable(table); return htable.exists(getFromThrift(get)); } catch (IOException e) { throw getTIOError(e); @@ -164,8 +166,9 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public TResult get(ByteBuffer table, TGet get) throws TIOError, TException { - HTableInterface htable = getTable(table); + HTableInterface htable = null; try { + htable = getTable(table); return resultFromHBase(htable.get(getFromThrift(get))); } catch (IOException e) { throw getTIOError(e); @@ -176,8 +179,9 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public List getMultiple(ByteBuffer table, List gets) throws TIOError, TException { - HTableInterface htable = getTable(table); + HTableInterface htable = null; try { + htable = getTable(table); return resultsFromHBase(htable.get(getsFromThrift(gets))); } catch (IOException e) { throw getTIOError(e); @@ -188,8 +192,9 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public void put(ByteBuffer table, TPut put) throws TIOError, TException { - HTableInterface htable = getTable(table); + HTableInterface htable = null; try { + htable = getTable(table); htable.put(putFromThrift(put)); } catch (IOException e) { throw getTIOError(e); @@ -201,8 +206,9 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public boolean checkAndPut(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TPut put) throws TIOError, TException { - HTableInterface htable = getTable(table); + HTableInterface htable = null; try { + htable = getTable(table); return htable.checkAndPut(byteBufferToByteArray(row), byteBufferToByteArray(family), byteBufferToByteArray(qualifier), (value == null) ? null : byteBufferToByteArray(value), putFromThrift(put)); @@ -215,8 +221,9 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public void putMultiple(ByteBuffer table, List puts) throws TIOError, TException { - HTableInterface htable = getTable(table); + HTableInterface htable = null; try { + htable = getTable(table); htable.put(putsFromThrift(puts)); } catch (IOException e) { throw getTIOError(e); @@ -227,8 +234,9 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public void deleteSingle(ByteBuffer table, TDelete deleteSingle) throws TIOError, TException { - HTableInterface htable = getTable(table); + HTableInterface htable = null; try { + htable = getTable(table); htable.delete(deleteFromThrift(deleteSingle)); } catch (IOException e) { throw getTIOError(e); @@ -240,8 +248,9 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public List deleteMultiple(ByteBuffer table, List deletes) throws TIOError, TException { - HTableInterface htable = getTable(table); + HTableInterface htable = null; try { + htable = getTable(table); htable.delete(deletesFromThrift(deletes)); } catch (IOException e) { throw getTIOError(e); @@ -254,9 +263,9 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public boolean checkAndDelete(ByteBuffer table, ByteBuffer row, ByteBuffer family, ByteBuffer qualifier, ByteBuffer value, TDelete deleteSingle) throws TIOError, TException { - HTableInterface htable = getTable(table); - + HTableInterface htable = null; try { + htable = getTable(table); if (value == null) { return htable.checkAndDelete(byteBufferToByteArray(row), byteBufferToByteArray(family), byteBufferToByteArray(qualifier), null, deleteFromThrift(deleteSingle)); @@ -274,8 +283,9 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public TResult increment(ByteBuffer table, TIncrement increment) throws TIOError, TException { - HTableInterface htable = getTable(table); + HTableInterface htable = null; try { + htable = getTable(table); return resultFromHBase(htable.increment(incrementFromThrift(increment))); } catch (IOException e) { throw getTIOError(e); @@ -286,8 +296,9 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public TResult append(ByteBuffer table, TAppend append) throws TIOError, TException { - HTableInterface htable = getTable(table); + HTableInterface htable = null; try { + htable = getTable(table); return resultFromHBase(htable.append(appendFromThrift(append))); } catch (IOException e) { throw getTIOError(e); @@ -298,9 +309,10 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public int openScanner(ByteBuffer table, TScan scan) throws TIOError, TException { - HTableInterface htable = getTable(table); + HTableInterface htable = null; ResultScanner resultScanner = null; try { + htable = getTable(table); resultScanner = htable.getScanner(scanFromThrift(scan)); } catch (IOException e) { throw getTIOError(e); @@ -330,10 +342,11 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public List getScannerResults(ByteBuffer table, TScan scan, int numRows) throws TIOError, TException { - HTableInterface htable = getTable(table); + HTableInterface htable = null; List results = null; ResultScanner scanner = null; try { + htable = getTable(table); scanner = htable.getScanner(scanFromThrift(scan)); results = resultsFromHBase(scanner.next(numRows)); } catch (IOException e) { @@ -364,8 +377,9 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { @Override public void mutateRow(ByteBuffer table, TRowMutations rowMutations) throws TIOError, TException { - HTableInterface htable = getTable(table); + HTableInterface htable = null; try { + htable = getTable(table); htable.mutateRow(rowMutationsFromThrift(rowMutations)); } catch (IOException e) { throw getTIOError(e); diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java index c701119..ffe7df2 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java @@ -18,8 +18,6 @@ */ package org.apache.hadoop.hbase.thrift2; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompatibilityFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -45,7 +43,6 @@ import org.apache.hadoop.hbase.thrift2.generated.TDelete; import org.apache.hadoop.hbase.thrift2.generated.TDeleteType; import org.apache.hadoop.hbase.thrift2.generated.TGet; import org.apache.hadoop.hbase.thrift2.generated.THBaseService; -import org.apache.hadoop.hbase.thrift2.generated.TIOError; import org.apache.hadoop.hbase.thrift2.generated.TIllegalArgument; import org.apache.hadoop.hbase.thrift2.generated.TIncrement; import org.apache.hadoop.hbase.thrift2.generated.TPut; @@ -62,6 +59,7 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; @@ -85,7 +83,6 @@ import static java.nio.ByteBuffer.wrap; @Category(MediumTests.class) public class TestThriftHBaseServiceHandler { - public static final Log LOG = LogFactory.getLog(TestThriftHBaseServiceHandler.class); private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); // Static names for tables, columns, rows, and values @@ -149,12 +146,12 @@ public class TestThriftHBaseServiceHandler { } - private ThriftHBaseServiceHandler createHandler() { + private ThriftHBaseServiceHandler createHandler() throws IOException { return new ThriftHBaseServiceHandler(UTIL.getConfiguration()); } @Test - public void testExists() throws TIOError, TException { + public void testExists() throws IOException, TException { ThriftHBaseServiceHandler handler = createHandler(); byte[] rowName = "testExists".getBytes(); ByteBuffer table = wrap(tableAname); @@ -669,7 +666,7 @@ public class TestThriftHBaseServiceHandler { // get scanner int scanId = handler.openScanner(table, scan); - List results = null; + List results; for (int i = 0; i < 10; i++) { // get batch for single row (10x10 is what we expect) results = handler.getScannerRows(scanId, 1); diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java index dc9e0c3..fc984a7 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java @@ -178,7 +178,7 @@ public void setup() throws Exception { } -private ThriftHBaseServiceHandler createHandler() { +private ThriftHBaseServiceHandler createHandler() throws IOException { return new ThriftHBaseServiceHandler(UTIL.getConfiguration()); } -- 1.8.4.2