diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 0fe2ff3..8ea8f41 100644 --- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -66,9 +66,9 @@ public interface Admin extends Abortable, Closeable { boolean isAborted(); /** - * @return HConnection used by this object. + * @return Connection used by this object. */ - HConnection getConnection(); + Connection getConnection(); /** * @return - true if the master server is running. Throws an exception otherwise. diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java new file mode 100644 index 0000000..acf679a --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java @@ -0,0 +1,127 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.Closeable; +import java.io.IOException; +import java.util.concurrent.ExecutorService; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.TableName; + +/** + * A cluster connection encapsulating lower level individual connections to actual servers and + * a connection to zookeeper. Connections are instantiated through the {@link ConnectionFactory} + * class. The lifecycle of the connection is managed by the caller, who has to {@link #close()} + * the connection to release the resources. + * + *
The connection object contains logic to find the master, locate regions out on the cluster, + * keeps a cache of locations and then knows how to re-calibrate after they move. The individual + * connections to servers, meta cache, zookeeper connection, etc are all shared by the + * {@link Table} and {@link Admin} instances obtained from this connection. + * + *
Connection creation is a heavy-weight operation. Connection implementations are thread-safe, + * so that the client can create a connection once, and share it with different threads. + * {@link Table} and {@link Admin} instances, on the other hand, are light-weight and are not + * thread-safe. Typically, a single connection per client application is instantiated and every + * thread will obtain its own Table instance. Caching or pooling of {@link Table} and {@link Admin} + * is not recommended. + * + *
This class replaces {@link HConnection}, which is now deprecated. + * @see ConnectionFactory + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface Connection extends Abortable, Closeable { + + /* + * Implementation notes: + * - Only allow new style of interfaces: + * -- All table names are passed as TableName. No more byte[] and string arguments + * -- Most of the classes with names H is deprecated in favor of non-H versions + * (Table, Connection vs HConnection, etc) + * -- Only real client-facing public methods are allowed + * - Connection should contain only getTable(), gAdmin() kind of general methods. + */ + + /** + * @return Configuration instance being used by this HConnection instance. + */ + Configuration getConfiguration(); + + /** + * Retrieve a Table implementation for accessing a table. + * The returned Table is not thread safe, a new instance should be created for each using thread. + * This is a lightweight operation, pooling or caching of the returned Table + * is neither required nor desired. + * + * @param tableName the name of the table + * @return a Table to use for interactions with this table + */ + Table getTable(TableName tableName) throws IOException; + + /** + * Retrieve a Table implementation for accessing a table. + * The returned Table is not thread safe, a new instance should be created for each using thread. + * This is a lightweight operation, pooling or caching of the returned Table + * is neither required nor desired. + * + * @param tableName the name of the table + * @param pool The thread pool to use for batch operations, null to use a default pool. + * @return a Table to use for interactions with this table + */ + Table getTable(TableName tableName, ExecutorService pool) throws IOException; + + /** + * Retrieve a RegionLocator implementation to inspect region information on a table. The returned + * RegionLocator is not thread-safe, so a new instance should be created for each using thread. + * + * This is a lightweight operation. Pooling or caching of the returned RegionLocator is neither + * required nor desired. + * + * RegionLocator needs to be unmanaged + * + * @param tableName Name of the table who's region is to be examined + * @return A RegionLocator instance + */ + public RegionLocator getRegionLocator(TableName tableName) throws IOException; + + /** + * Retrieve an Admin implementation to administer an HBase cluster. + * The returned Admin is not guaranteed to be thread-safe. A new instance should be created for + * each using thread. This is a lightweight operation. Pooling or caching of the returned + * Admin is not recommended. + * + * @return an Admin instance for cluster administration + */ + Admin getAdmin() throws IOException; + + @Override + public void close() throws IOException; + + /** + * Returns whether the connection is closed or not. + * @return true if this connection is closed + */ + boolean isClosed(); + +} diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java new file mode 100644 index 0000000..0c054ed --- /dev/null +++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -0,0 +1,186 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.concurrent.ExecutorService; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.UserProvider; + + +/** + * A non-instantiable class that manages creation of {@link Connection}s. + * Managing the lifecycle of the {@link Connection}s to the cluster is the responsibility of + * the caller. + * From this {@link Connection} {@link Table} implementations are retrieved + * with {@link Connection#getTable(TableName)}. Example: + *
+ * {@code
+ * Connection connection = ConnectionFactory.createConnection(config);
+ * Table table = connection.getTable(TableName.valueOf("table1"));
+ * try {
+ * // Use the table as needed, for a single operation and a single thread
+ * } finally {
+ * table.close();
+ * connection.close();
+ * }
+ *
+ *
+ * Similarly, {@link Connection} also returns {@link RegionLocator} implementations.
+ *
+ * This class replaces {@link HConnectionManager}, which is now deprecated.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class ConnectionFactory {
+
+ /** No public c.tors */
+ protected ConnectionFactory() {
+ }
+
+ /**
+ * Create a new Connection instance using the passed conf instance. Connection
+ * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces
+ * created from returned connection share zookeeper connection, meta cache, and connections
+ * to region servers and masters.
+ * The caller is responsible for calling {@link Connection#close()} on the returned
+ * connection instance.
+ *
+ * Typical usage:
+ *
+ * Connection connection = ConnectionFactory.createConnection(conf);
+ * Table table = connection.getTable(TableName.valueOf("mytable"));
+ * try {
+ * table.get(...);
+ * ...
+ * } finally {
+ * table.close();
+ * connection.close();
+ * }
+ *
+ *
+ * @param conf configuration
+ * @return Connection object for conf
+ */
+ public static Connection createConnection(Configuration conf) throws IOException {
+ return createConnection(conf, null, null);
+ }
+
+ /**
+ * Create a new Connection instance using the passed conf instance. Connection
+ * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces
+ * created from returned connection share zookeeper connection, meta cache, and connections
+ * to region servers and masters.
+ * The caller is responsible for calling {@link Connection#close()} on the returned
+ * connection instance.
+ *
+ * Typical usage:
+ *
+ * Connection connection = ConnectionFactory.createConnection(conf);
+ * Table table = connection.getTable(TableName.valueOf("mytable"));
+ * try {
+ * table.get(...);
+ * ...
+ * } finally {
+ * table.close();
+ * connection.close();
+ * }
+ *
+ *
+ * @param conf configuration
+ * @param pool the thread pool to use for batch operations
+ * @return Connection object for conf
+ */
+ public static Connection createConnection(Configuration conf, ExecutorService pool)
+ throws IOException {
+ return createConnection(conf, pool, null);
+ }
+
+ /**
+ * Create a new Connection instance using the passed conf instance. Connection
+ * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces
+ * created from returned connection share zookeeper connection, meta cache, and connections
+ * to region servers and masters.
+ * The caller is responsible for calling {@link Connection#close()} on the returned
+ * connection instance.
+ *
+ * Typical usage:
+ *
+ * Connection connection = ConnectionFactory.createConnection(conf);
+ * Table table = connection.getTable(TableName.valueOf("table1"));
+ * try {
+ * table.get(...);
+ * ...
+ * } finally {
+ * table.close();
+ * connection.close();
+ * }
+ *
+ *
+ * @param conf configuration
+ * @param user the user the connection is for
+ * @return Connection object for conf
+ */
+ public static Connection createConnection(Configuration conf, User user)
+ throws IOException {
+ return createConnection(conf, null, user);
+ }
+
+ /**
+ * Create a new Connection instance using the passed conf instance. Connection
+ * encapsulates all housekeeping for a connection to the cluster. All tables and interfaces
+ * created from returned connection share zookeeper connection, meta cache, and connections
+ * to region servers and masters.
+ * The caller is responsible for calling {@link Connection#close()} on the returned
+ * connection instance.
+ *
+ * Typical usage:
+ *
+ * Connection connection = ConnectionFactory.createConnection(conf);
+ * Table table = connection.getTable(TableName.valueOf("table1"));
+ * try {
+ * table.get(...);
+ * ...
+ * } finally {
+ * table.close();
+ * connection.close();
+ * }
+ *
+ *
+ * @param conf configuration
+ * @param user the user the connection is for
+ * @param pool the thread pool to use for batch operations
+ * @return Connection object for conf
+ */
+ public static Connection createConnection(Configuration conf, ExecutorService pool, User user)
+ throws IOException {
+ if (user == null) {
+ UserProvider provider = UserProvider.instantiate(conf);
+ user = provider.getCurrent();
+ }
+
+ return ConnectionManager.createConnection(conf, pool, user);
+ }
+
+}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
index cd11a52..67df350 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
@@ -18,7 +18,6 @@
*/
package org.apache.hadoop.hbase.client;
-import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.ExecutorService;
@@ -26,7 +25,6 @@ import java.util.concurrent.ExecutorService;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
@@ -57,10 +55,12 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
* HConnections awkward. See {@link HConnectionManager} for cleanup discussion.
*
* @see HConnectionManager
+ * @deprecated in favor of {@link Connection} and {@link ConnectionFactory}
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
-public interface HConnection extends Abortable, Closeable {
+@Deprecated
+public interface HConnection extends Connection {
/**
* Key for configuration in Configuration whose value is the class we implement making a
* new HConnection instance.
@@ -70,6 +70,7 @@ public interface HConnection extends Abortable, Closeable {
/**
* @return Configuration instance being used by this HConnection instance.
*/
+ @Override
Configuration getConfiguration();
/**
@@ -109,6 +110,7 @@ public interface HConnection extends Abortable, Closeable {
* @param tableName
* @return an HTable to use for interactions with this table
*/
+ @Override
public HTableInterface getTable(TableName tableName) throws IOException;
/**
@@ -151,6 +153,7 @@ public interface HConnection extends Abortable, Closeable {
* @param pool The thread pool to use for batch operations, null to use a default pool.
* @return an HTable to use for interactions with this table
*/
+ @Override
public HTableInterface getTable(TableName tableName, ExecutorService pool) throws IOException;
/**
@@ -166,6 +169,7 @@ public interface HConnection extends Abortable, Closeable {
* @param tableName Name of the table who's region is to be examined
* @return A RegionLocator instance
*/
+ @Override
public RegionLocator getRegionLocator(TableName tableName) throws IOException;
/**
@@ -176,6 +180,7 @@ public interface HConnection extends Abortable, Closeable {
*
* @return an Admin instance for cluster administration
*/
+ @Override
Admin getAdmin() throws IOException;
/** @return - true if the master server is running
@@ -542,6 +547,7 @@ public interface HConnection extends Abortable, Closeable {
/**
* @return true if this connection is closed
*/
+ @Override
boolean isClosed();
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
index 3ff8fa2..ef0a69c 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.security.User;
*
* {@code
* HConnection connection = HConnectionManager.createConnection(config);
- * HTableInterface table = connection.getTable("table1");
+ * HTableInterface table = connection.getTable(TableName.valueOf("table1"));
* try {
* // Use the table as needed, for a single operation and a single thread
* } finally {
@@ -82,11 +82,12 @@ import org.apache.hadoop.hbase.security.User;
* were problematic for clients of HConnection that wanted to register their
* own shutdown hooks so we removed ours though this shifts the onus for
* cleanup to the client.
+ * @deprecated Please use ConnectionFactory instead
*/
-@SuppressWarnings("serial")
@InterfaceAudience.Public
@InterfaceStability.Evolving
-public class HConnectionManager {
+@Deprecated
+public class HConnectionManager extends ConnectionFactory {
@Deprecated
public static final String RETRIES_BY_SERVER_KEY =
@@ -112,6 +113,7 @@ public class HConnectionManager {
* @return HConnection object for conf
* @throws ZooKeeperConnectionException
*/
+ @Deprecated
public static HConnection getConnection(final Configuration conf) throws IOException {
return ConnectionManager.getConnectionInternal(conf);
}
@@ -126,16 +128,19 @@ public class HConnectionManager {
* {@code
* HConnection connection = HConnectionManager.createConnection(conf);
* HTableInterface table = connection.getTable("mytable");
- * table.get(...);
- * ...
- * table.close();
- * connection.close();
+ * try {
+ * table.get(...);
+ * ...
+ * } finally {
+ * table.close();
+ * connection.close();
* }
*
* @param conf configuration
* @return HConnection object for conf
* @throws ZooKeeperConnectionException
*/
+ @Deprecated
public static HConnection createConnection(Configuration conf) throws IOException {
return ConnectionManager.createConnectionInternal(conf);
}
@@ -161,6 +166,7 @@ public class HConnectionManager {
* @return HConnection object for conf
* @throws ZooKeeperConnectionException
*/
+ @Deprecated
public static HConnection createConnection(Configuration conf, ExecutorService pool)
throws IOException {
return ConnectionManager.createConnection(conf, pool);
@@ -186,6 +192,7 @@ public class HConnectionManager {
* @return HConnection object for conf
* @throws ZooKeeperConnectionException
*/
+ @Deprecated
public static HConnection createConnection(Configuration conf, User user)
throws IOException {
return ConnectionManager.createConnection(conf, user);
@@ -212,6 +219,7 @@ public class HConnectionManager {
* @return HConnection object for conf
* @throws ZooKeeperConnectionException
*/
+ @Deprecated
public static HConnection createConnection(Configuration conf, ExecutorService pool, User user)
throws IOException {
return ConnectionManager.createConnection(conf, pool, user);
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 40d46c4..d889b65 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -162,7 +162,10 @@ public class HTable implements HTableInterface, RegionLocator {
* @param conf Configuration object to use.
* @param tableName Name of the table.
* @throws IOException if a remote or network exception occurs
+ * @deprecated Constructing HTable objects manually has been deprecated. Please use
+ * {@link Connection} to instantiate a {@link Table} instead.
*/
+ @Deprecated
public HTable(Configuration conf, final String tableName)
throws IOException {
this(conf, TableName.valueOf(tableName));
@@ -177,7 +180,10 @@ public class HTable implements HTableInterface, RegionLocator {
* @param conf Configuration object to use.
* @param tableName Name of the table.
* @throws IOException if a remote or network exception occurs
+ * @deprecated Constructing HTable objects manually has been deprecated. Please use
+ * {@link Connection} to instantiate a {@link Table} instead.
*/
+ @Deprecated
public HTable(Configuration conf, final byte[] tableName)
throws IOException {
this(conf, TableName.valueOf(tableName));
@@ -194,7 +200,10 @@ public class HTable implements HTableInterface, RegionLocator {
* @param conf Configuration object to use.
* @param tableName table name pojo
* @throws IOException if a remote or network exception occurs
+ * @deprecated Constructing HTable objects manually has been deprecated. Please use
+ * {@link Connection} to instantiate a {@link Table} instead.
*/
+ @Deprecated
public HTable(Configuration conf, final TableName tableName)
throws IOException {
this.tableName = tableName;
@@ -213,14 +222,14 @@ public class HTable implements HTableInterface, RegionLocator {
/**
* Creates an object to access a HBase table. Shares zookeeper connection and other resources with
* other HTable instances created with the same connection instance. Use this
- * constructor when the HConnection instance is externally managed.
+ * constructor when the Connection instance is externally managed.
* @param tableName Name of the table.
* @param connection HConnection to be used.
* @throws IOException if a remote or network exception occurs
* @deprecated Do not use.
*/
@Deprecated
- public HTable(TableName tableName, HConnection connection) throws IOException {
+ public HTable(TableName tableName, Connection connection) throws IOException {
this.tableName = tableName;
this.cleanupPoolOnClose = true;
this.cleanupConnectionOnClose = false;
@@ -259,7 +268,10 @@ public class HTable implements HTableInterface, RegionLocator {
* @param tableName Name of the table.
* @param pool ExecutorService to be used.
* @throws IOException if a remote or network exception occurs
+ * @deprecated Constructing HTable objects manually has been deprecated. Please use
+ * {@link Connection} to instantiate a {@link Table} instead.
*/
+ @Deprecated
public HTable(Configuration conf, final byte[] tableName, final ExecutorService pool)
throws IOException {
this(conf, TableName.valueOf(tableName), pool);
@@ -276,7 +288,10 @@ public class HTable implements HTableInterface, RegionLocator {
* @param tableName Name of the table.
* @param pool ExecutorService to be used.
* @throws IOException if a remote or network exception occurs
+ * @deprecated Constructing HTable objects manually has been deprecated. Please use
+ * {@link Connection} to instantiate a {@link Table} instead.
*/
+ @Deprecated
public HTable(Configuration conf, final TableName tableName, final ExecutorService pool)
throws IOException {
this.connection = ConnectionManager.getConnectionInternal(conf);
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
index aad8403..95c0ccf 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
@@ -26,7 +26,7 @@ import java.io.IOException;
/**
* Interface for client-side scanning.
- * Go to {@link HTable} to obtain instances.
+ * Go to {@link Table} to obtain instances.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
@@ -50,5 +50,6 @@ public interface ResultScanner extends Closeable, Iterable {
/**
* Closes the scanner and releases any resources it has allocated
*/
+ @Override
void close();
}
diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
index f1f5f93..9ae00f9 100644
--- hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
+++ hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
@@ -25,7 +25,6 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterManager.ServiceType;
-import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
@@ -46,7 +45,7 @@ import com.google.common.collect.Sets;
@InterfaceAudience.Private
public class DistributedHBaseCluster extends HBaseCluster {
- private Admin admin;
+ private HBaseAdmin admin;
private ClusterManager clusterManager;
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
index ef205be..c2d1e72 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
@@ -93,7 +93,7 @@ public class RegionPlacementMaintainer {
private Configuration conf;
private final boolean enforceLocality;
private final boolean enforceMinAssignmentMove;
- private Admin admin;
+ private HBaseAdmin admin;
private RackManager rackManager;
private Set targetTableSet;
@@ -128,7 +128,7 @@ public class RegionPlacementMaintainer {
* @return the cached HBaseAdmin
* @throws IOException
*/
- private Admin getHBaseAdmin() throws IOException {
+ private HBaseAdmin getHBaseAdmin() throws IOException {
if (this.admin == null) {
this.admin = new HBaseAdmin(this.conf);
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 017153a..757a53a 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MasterNotRunningException;
+import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
@@ -186,7 +187,7 @@ public class HBaseFsck extends Configured {
private static final Log LOG = LogFactory.getLog(HBaseFsck.class.getName());
private ClusterStatus status;
private HConnection connection;
- private Admin admin;
+ private HBaseAdmin admin;
private Table meta;
// threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions
protected ExecutorService executor;
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index 5566e25..53a71b7 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
@@ -63,7 +64,7 @@ public class HBaseFsckRepair {
* @param region Region to undeploy
* @param servers list of Servers to undeploy from
*/
- public static void fixMultiAssignment(Admin admin, HRegionInfo region,
+ public static void fixMultiAssignment(HBaseAdmin admin, HRegionInfo region,
List servers)
throws IOException, KeeperException, InterruptedException {
HRegionInfo actualRegion = new HRegionInfo(region);
@@ -148,7 +149,7 @@ public class HBaseFsckRepair {
* (default 120s) to close the region. This bypasses the active hmaster.
*/
@SuppressWarnings("deprecation")
- public static void closeRegionSilentlyAndWait(Admin admin,
+ public static void closeRegionSilentlyAndWait(HBaseAdmin admin,
ServerName server, HRegionInfo region) throws IOException, InterruptedException {
HConnection connection = admin.getConnection();
AdminService.BlockingInterface rs = connection.getAdmin(server);
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 45bc524..e58ff48 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -2561,7 +2561,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* @return An Admin instance.
* @throws IOException
*/
- public synchronized Admin getHBaseAdmin()
+ public synchronized HBaseAdmin getHBaseAdmin()
throws IOException {
if (hbaseAdmin == null){
hbaseAdmin = new HBaseAdminForTests(getConfiguration());
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
index d46d73b..2679c00 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
@@ -98,7 +98,7 @@ public class TestRegionRebalancing {
@Test (timeout=300000)
public void testRebalanceOnRegionServerNumberChange()
throws IOException, InterruptedException {
- Admin admin = new HBaseAdmin(UTIL.getConfiguration());
+ HBaseAdmin admin = new HBaseAdmin(UTIL.getConfiguration());
admin.createTable(this.desc, Arrays.copyOfRange(HBaseTestingUtility.KEYS,
1, HBaseTestingUtility.KEYS.length));
this.table = new HTable(UTIL.getConfiguration(), this.desc.getTableName());
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java
index abb1ce1..de13b84 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientTimeouts.java
@@ -88,7 +88,7 @@ public class TestClientTimeouts {
// Ensure the HBaseAdmin uses a new connection by changing Configuration.
Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
conf.set(HConstants.HBASE_CLIENT_INSTANCE_ID, String.valueOf(-1));
- Admin admin = null;
+ HBaseAdmin admin = null;
try {
admin = new HBaseAdmin(conf);
HConnection connection = admin.getConnection();
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
index 4acece3..17b4afd 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
@@ -84,7 +85,7 @@ public class TestAssignmentManagerOnCluster {
private final static byte[] FAMILY = Bytes.toBytes("FAMILY");
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
final static Configuration conf = TEST_UTIL.getConfiguration();
- private static Admin admin;
+ private static HBaseAdmin admin;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
index 9c89e2b..8f894c8 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
@@ -63,7 +63,7 @@ import org.junit.experimental.categories.Category;
public class TestMasterOperationsForRegionReplicas {
final static Log LOG = LogFactory.getLog(TestRegionPlacement.class);
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- private static Admin admin;
+ private static HBaseAdmin admin;
private static int numSlaves = 2;
private static Configuration conf;
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
index c046b22..3dc2868 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
@@ -269,7 +269,7 @@ public class TestHBaseFsck {
* This method is used to undeploy a region -- close it and attempt to
* remove its state from the Master.
*/
- private void undeployRegion(Admin admin, ServerName sn,
+ private void undeployRegion(HBaseAdmin admin, ServerName sn,
HRegionInfo hri) throws IOException, InterruptedException {
try {
HBaseFsckRepair.closeRegionSilentlyAndWait(admin, sn, hri);
@@ -417,7 +417,7 @@ public class TestHBaseFsck {
* @throws IOException
*/
void deleteTable(TableName tablename) throws IOException {
- Admin admin = new HBaseAdmin(conf);
+ HBaseAdmin admin = new HBaseAdmin(conf);
admin.getConnection().clearRegionCache();
if (admin.isTableEnabled(tablename)) {
admin.disableTableAsync(tablename);
@@ -679,7 +679,7 @@ public class TestHBaseFsck {
/**
* Get region info from local cluster.
*/
- Map> getDeployedHRIs(final Admin admin) throws IOException {
+ Map> getDeployedHRIs(final HBaseAdmin admin) throws IOException {
ClusterStatus status = admin.getClusterStatus();
Collection regionServers = status.getServers();
Map> mm =
@@ -738,7 +738,7 @@ public class TestHBaseFsck {
// different regions with the same start/endkeys since it doesn't
// differentiate on ts/regionId! We actually need to recheck
// deployments!
- Admin admin = TEST_UTIL.getHBaseAdmin();
+ HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
while (findDeployedHSI(getDeployedHRIs(admin), hriDupe) == null) {
Thread.sleep(250);
}
@@ -896,7 +896,7 @@ public class TestHBaseFsck {
}
}
- Admin admin = TEST_UTIL.getHBaseAdmin();
+ HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
HBaseFsckRepair.closeRegionSilentlyAndWait(admin,
cluster.getRegionServer(k).getServerName(), hbi.getHdfsHRI());
admin.offline(regionName);
@@ -1604,7 +1604,7 @@ public class TestHBaseFsck {
HRegionInfo hri = location.getRegionInfo();
// do a regular split
- Admin admin = TEST_UTIL.getHBaseAdmin();
+ HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
byte[] regionName = location.getRegionInfo().getRegionName();
admin.splitRegion(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
TestEndToEndSplitTransaction.blockUntilRegionSplit(
@@ -2378,7 +2378,7 @@ public class TestHBaseFsck {
if (unassign) {
LOG.info("Undeploying meta region " + hri + " from server " + hsa);
HConnection unmanagedConnection = HConnectionManager.createConnection(conf);
- Admin admin = unmanagedConnection.getAdmin();
+ HBaseAdmin admin = (HBaseAdmin) unmanagedConnection.getAdmin();
try {
undeployRegion(admin, hsa, hri);
} finally {