diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 7c7fc3e..7fb9bc2 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -247,7 +247,7 @@ class AsyncProcess {
RpcRetryingCallerFactory rpcCaller, boolean useGlobalErrors,
RpcControllerFactory rpcFactory) {
if (hc == null) {
- throw new IllegalArgumentException("HConnection cannot be null.");
+ throw new IllegalArgumentException("Connection cannot be null.");
}
this.connection = hc;
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
index 99071fa..cf5de47 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
-/** Internal methods on HConnection that should not be used by user code. */
+/** Internal methods on Connection that should not be used by user code. */
@InterfaceAudience.Private
// NOTE: Although this class is public, this class is meant to be used directly from internal
// classes and unit tests only.
@@ -262,13 +262,13 @@ public interface ClusterConnection extends HConnection {
/**
* @param serverName
* @return true if the server is known as dead, false otherwise.
- * @deprecated internal method, do not use thru HConnection */
+ * @deprecated internal method, do not use thru Connection */
@Override
@Deprecated
boolean isDeadServer(ServerName serverName);
/**
- * @return Nonce generator for this HConnection; may be null if disabled in configuration.
+ * @return Nonce generator for this Connection; may be null if disabled in configuration.
*/
@Override
public NonceGenerator getNonceGenerator();
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
index a3f6fe6..2a72d6a 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
@@ -46,7 +46,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
* thread will obtain its own Table instance. Caching or pooling of {@link Table} and {@link Admin}
* is not recommended.
*
- *
This class replaces {@link HConnection}, which is now deprecated.
+ *
This class replaces HConnection from pre 1.0 api.
* @see ConnectionFactory
* @since 0.99.0
*/
@@ -54,6 +54,13 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
@InterfaceStability.Evolving
public interface Connection extends Abortable, Closeable {
+ /**
+ * Key for configuration in Configuration whose value is the class we implement making a
+ * new Connection instance.
+ */
+ public static final String HBASE_CLIENT_CONNECTION_IMPL = "hbase.client.connection.impl";
+
+
/*
* Implementation notes:
* - Only allow new style of interfaces:
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
index 3e8ca31..61ba947 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
@@ -214,7 +214,7 @@ public class ConnectionFactory {
user = provider.getCurrent();
}
- String className = conf.get(HConnection.HBASE_CLIENT_CONNECTION_IMPL,
+ String className = conf.get(Connection.HBASE_CLIENT_CONNECTION_IMPL,
ConnectionImplementation.class.getName());
Class> clazz;
try {
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index f7d4658..0cb2648 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -125,7 +125,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
* Once it's set under nonceGeneratorCreateLock, it is never unset or changed.
*/
private static volatile NonceGenerator nonceGenerator = null;
- /** The nonce generator lock. Only taken when creating HConnection, which gets a private copy. */
+ /** The nonce generator lock. Only taken when creating Connection, which gets a private copy. */
private static Object nonceGeneratorCreateLock = new Object();
private final AsyncProcess asyncProcess;
@@ -1039,11 +1039,11 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
* State of the MasterService connection/setup.
*/
static class MasterServiceState {
- HConnection connection;
+ ClusterConnection connection;
MasterProtos.MasterService.BlockingInterface stub;
int userCount;
- MasterServiceState(final HConnection connection) {
+ MasterServiceState(final ClusterConnection connection) {
super();
this.connection = connection;
}
@@ -2181,7 +2181,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
* point, which would be the case if all of its consumers close the
* connection. However, on the off chance that someone is unable to close
* the connection, perhaps because it bailed out prematurely, the method
- * below will ensure that this {@link org.apache.hadoop.hbase.client.HConnection} instance
+ * below will ensure that this {@link org.apache.hadoop.hbase.client.Connection} instance
* is cleaned up.
* Caveat: The JVM may take an unknown amount of time to call finalize on an
* unreachable object, so our hope is that every consumer cleans up after
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 82c2fc4..d375230 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -87,13 +87,13 @@ public final class ConnectionUtils {
}
/**
- * Changes the configuration to set the number of retries needed when using HConnection
+ * Changes the configuration to set the number of retries needed when using Connection
* internally, e.g. for updating catalog tables, etc.
* Call this method before we create any Connections.
* @param c The Configuration instance to set the retries into.
* @param log Used to log what we set in here.
*/
- public static void setServerSideHConnectionRetriesConfig(
+ public static void setServerSideConnectionRetriesConfig(
final Configuration c, final String sn, final Log log) {
// TODO: Fix this. Not all connections from server side should have 10 times the retries.
int hcRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
@@ -103,7 +103,7 @@ public final class ConnectionUtils {
int serversideMultiplier = c.getInt("hbase.client.serverside.retries.multiplier", 10);
int retries = hcRetries * serversideMultiplier;
c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
- log.info(sn + " server-side HConnection retries=" + retries);
+ log.info(sn + " server-side Connection retries=" + retries);
}
/**
@@ -145,7 +145,7 @@ public final class ConnectionUtils {
*/
@VisibleForTesting
public static void setupMasterlessConnection(Configuration conf) {
- conf.set(HConnection.HBASE_CLIENT_CONNECTION_IMPL,
+ conf.set(Connection.HBASE_CLIENT_CONNECTION_IMPL,
MasterlessConnection.class.getName());
}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 82bfee7..6bc8e6c 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -370,7 +370,7 @@ public class HBaseAdmin implements Admin {
/** @return HConnection used by this object. */
@Override
- public HConnection getConnection() {
+ public ClusterConnection getConnection() {
return connection;
}
@@ -539,7 +539,7 @@ public class HBaseAdmin implements Admin {
}
static HTableDescriptor getTableDescriptor(final TableName tableName,
- HConnection connection, RpcRetryingCallerFactory rpcCallerFactory,
+ ClusterConnection connection, RpcRetryingCallerFactory rpcCallerFactory,
int operationTimeout) throws TableNotFoundException, IOException {
if (tableName == null) return null;
@@ -2668,7 +2668,7 @@ public class HBaseAdmin implements Admin {
* @param regionName Name of a region.
* @return a pair of HRegionInfo and ServerName if regionName is
* a verified region name (we call {@link
- * MetaTableAccessor#getRegion(HConnection, byte[])}
+ * MetaTableAccessor#getRegion(Connection, byte[])}
* else null.
* Throw IllegalArgumentException if regionName is null.
* @throws IOException
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectable.java
deleted file mode 100644
index f5f841d..0000000
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectable.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * This class makes it convenient for one to execute a command in the context
- * of a {@link HConnection} instance based on the given {@link Configuration}.
- *
- *
- * If you find yourself wanting to use a {@link HConnection} for a relatively
- * short duration of time, and do not want to deal with the hassle of creating
- * and cleaning up that resource, then you should consider using this
- * convenience class.
- *
- * @param
- * the return type of the {@link HConnectable#connect(HConnection)}
- * method.
- */
-@InterfaceAudience.Private
-public abstract class HConnectable {
- protected Configuration conf;
-
- protected HConnectable(Configuration conf) {
- this.conf = conf;
- }
-
- public abstract T connect(HConnection connection) throws IOException;
-}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
index cc5e9fa..e776526 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
@@ -57,7 +57,9 @@ public interface HConnection extends Connection {
/**
* Key for configuration in Configuration whose value is the class we implement making a
* new HConnection instance.
+ * @deprecated use {Connection.HBASE_CLIENT_CONNECTION_IMPL}
*/
+ @Deprecated
public static final String HBASE_CLIENT_CONNECTION_IMPL = "hbase.client.connection.impl";
/**
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 51a95e4..b7e46a6 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -145,7 +145,7 @@ public class HTable implements HTableInterface {
* Used by HBase internally. DO NOT USE. See {@link ConnectionFactory} class comment for how to
* get a {@link Table} instance (use {@link Table} instead of {@link HTable}).
* @param tableName Name of the table.
- * @param connection HConnection to be used.
+ * @param connection ClusterConnection to be used.
* @param pool ExecutorService to be used.
* @throws IOException if a remote or network exception occurs
*/
@@ -248,7 +248,8 @@ public class HTable implements HTableInterface {
* INTERNAL Used by unit tests and tools to do low-level
* manipulations.
* @return An HConnection instance.
- * @deprecated This method will be changed from public to package protected.
+ * @deprecated deprecated in 1.0 but used by public bulk load an api that cannot be removed
+ * until 3.0.
*/
// TODO(tsuna): Remove this. Unit tests shouldn't require public helpers.
@Deprecated
@@ -262,7 +263,8 @@ public class HTable implements HTableInterface {
*/
@Override
public HTableDescriptor getTableDescriptor() throws IOException {
- HTableDescriptor htd = HBaseAdmin.getTableDescriptor(tableName, connection, rpcCallerFactory, operationTimeout);
+ HTableDescriptor htd = HBaseAdmin.getTableDescriptor(tableName, connection,
+ rpcCallerFactory, operationTimeout);
if (htd != null) {
return new UnmodifyableHTableDescriptor(htd);
}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
index 4cd81e7..5d52397 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
/**
* Used to communicate with a single HBase table.
- * Obtain an instance from an {@link HConnection}.
*
* @since 0.21.0
* @deprecated use {@link org.apache.hadoop.hbase.client.Table} instead
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
index a636533..47a0e1e 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
@@ -26,10 +26,10 @@ import java.io.IOException;
* @param return type
*/
abstract class MasterCallable implements RetryingCallable, Closeable {
- protected HConnection connection;
+ protected ClusterConnection connection;
protected MasterKeepAliveConnection master;
- public MasterCallable(final HConnection connection) {
+ public MasterCallable(final ClusterConnection connection) {
this.connection = connection;
}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
index 72ae829..a5603d2 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
@@ -151,7 +151,7 @@ class MultiServerCallable extends RegionServerCallable impleme
private boolean isCellBlock() {
// This is not exact -- the configuration could have changed on us after connection was set up
// but it will do for now.
- HConnection connection = getConnection();
+ ClusterConnection connection = getConnection();
if (connection == null) return true; // Default is to do cellblocks.
Configuration configuration = connection.getConfiguration();
if (configuration == null) return true;
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
index 189dbaa..3c51c43 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
@@ -136,9 +136,9 @@ public abstract class RegionAdminServiceCallable implements RetryingCallable<
}
/**
- * @return {@link HConnection} instance used by this Callable.
+ * @return {@link ClusterConnection} instance used by this Callable.
*/
- HConnection getConnection() {
+ ClusterConnection getConnection() {
return this.connection;
}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
index 9989d56..e06c9db 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
@@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.util.Bytes;
public abstract class RegionServerCallable implements RetryingCallable {
// Public because used outside of this package over in ipc.
private static final Log LOG = LogFactory.getLog(RegionServerCallable.class);
- protected final Connection connection;
+ protected final ClusterConnection connection;
protected final TableName tableName;
protected final byte[] row;
protected HRegionLocation location;
@@ -61,7 +61,7 @@ public abstract class RegionServerCallable implements RetryingCallable {
* @param tableName Table name to which row belongs.
* @param row The row we want in tableName.
*/
- public RegionServerCallable(Connection connection, TableName tableName, byte [] row) {
+ public RegionServerCallable(ClusterConnection connection, TableName tableName, byte [] row) {
this.connection = connection;
this.tableName = tableName;
this.row = row;
@@ -86,10 +86,10 @@ public abstract class RegionServerCallable implements RetryingCallable {
}
/**
- * @return {@link HConnection} instance used by this Callable.
+ * @return {@link Connection} instance used by this Callable.
*/
- HConnection getConnection() {
- return (HConnection) this.connection;
+ ClusterConnection getConnection() {
+ return this.connection;
}
protected ClientService.BlockingInterface getStub() {
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java
index 04d4b41..78315c5 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java
@@ -28,9 +28,9 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
/**
* We inherit the current ZooKeeperWatcher implementation to change the semantic
* of the close: the new close won't immediately close the connection but
- * will have a keep alive. See {@link HConnection}.
+ * will have a keep alive. See {@link ClusterConnection}.
* This allows to make it available with a consistent interface. The whole
- * ZooKeeperWatcher use in HConnection will be then changed to remove the
+ * ZooKeeperWatcher use in ClusterConnection will be then changed to remove the
* watcher part.
*
* This class is intended to be used internally by HBase classes; but not by
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
index 98a74ef..4b30471 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/MasterCoprocessorRpcChannel.java
@@ -24,7 +24,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
@@ -45,9 +45,9 @@ import com.google.protobuf.Message;
public class MasterCoprocessorRpcChannel extends CoprocessorRpcChannel{
private static final Log LOG = LogFactory.getLog(MasterCoprocessorRpcChannel.class);
- private final HConnection connection;
+ private final ClusterConnection connection;
- public MasterCoprocessorRpcChannel(HConnection conn) {
+ public MasterCoprocessorRpcChannel(ClusterConnection conn) {
this.connection = conn;
}
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
index 3fcfceb..3ab2b80 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionCoprocessorRpcChannel.java
@@ -25,7 +25,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.RegionServerCallable;
import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -49,7 +49,7 @@ import com.google.protobuf.Message;
public class RegionCoprocessorRpcChannel extends CoprocessorRpcChannel{
private static final Log LOG = LogFactory.getLog(RegionCoprocessorRpcChannel.class);
- private final HConnection connection;
+ private final ClusterConnection connection;
private final TableName table;
private final byte[] row;
private byte[] lastRegion;
@@ -57,7 +57,7 @@ public class RegionCoprocessorRpcChannel extends CoprocessorRpcChannel{
private RpcRetryingCallerFactory rpcFactory;
- public RegionCoprocessorRpcChannel(HConnection conn, TableName table, byte[] row) {
+ public RegionCoprocessorRpcChannel(ClusterConnection conn, TableName table, byte[] row) {
this.connection = conn;
this.table = table;
this.row = row;
diff --git hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
index 3cffd04..a2b4794 100644
--- hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
+++ hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
@@ -257,7 +257,7 @@ public class MetaTableLocator {
* @throws java.io.IOException
* @throws InterruptedException
*/
- public boolean verifyMetaRegionLocation(HConnection hConnection,
+ public boolean verifyMetaRegionLocation(ClusterConnection hConnection,
ZooKeeperWatcher zkw, final long timeout)
throws InterruptedException, IOException {
return verifyMetaRegionLocation(hConnection, zkw, timeout, HRegionInfo.DEFAULT_REPLICA_ID);
@@ -273,7 +273,7 @@ public class MetaTableLocator {
* @throws InterruptedException
* @throws IOException
*/
- public boolean verifyMetaRegionLocation(HConnection hConnection,
+ public boolean verifyMetaRegionLocation(ClusterConnection hConnection,
ZooKeeperWatcher zkw, final long timeout, int replicaId)
throws InterruptedException, IOException {
AdminProtos.AdminService.BlockingInterface service = null;
@@ -355,7 +355,7 @@ public class MetaTableLocator {
* @throws NotAllMetaRegionsOnlineException if timed out waiting
* @throws IOException
*/
- private AdminService.BlockingInterface getMetaServerConnection(HConnection hConnection,
+ private AdminService.BlockingInterface getMetaServerConnection(ClusterConnection hConnection,
ZooKeeperWatcher zkw, long timeout, int replicaId)
throws InterruptedException, NotAllMetaRegionsOnlineException, IOException {
return getCachedConnection(hConnection, waitMetaRegionLocation(zkw, replicaId, timeout));
@@ -369,7 +369,7 @@ public class MetaTableLocator {
* @throws IOException
*/
@SuppressWarnings("deprecation")
- private static AdminService.BlockingInterface getCachedConnection(HConnection hConnection,
+ private static AdminService.BlockingInterface getCachedConnection(ClusterConnection hConnection,
ServerName sn)
throws IOException {
if (sn == null) {
@@ -482,8 +482,8 @@ public class MetaTableLocator {
try {
int prefixLen = ProtobufUtil.lengthOfPBMagic();
ZooKeeperProtos.MetaRegionServer rl =
- ZooKeeperProtos.MetaRegionServer.PARSER.parseFrom
- (data, prefixLen, data.length - prefixLen);
+ ZooKeeperProtos.MetaRegionServer.PARSER.parseFrom(data, prefixLen,
+ data.length - prefixLen);
if (rl.hasState()) {
state = RegionState.State.convert(rl.getState());
}
diff --git hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index 067f2ad..99b379c 100644
--- hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -415,7 +415,7 @@ public class TestAsyncProcess {
@Test
public void testSubmit() throws Exception {
- ClusterConnection hc = createHConnection();
+ ClusterConnection hc = createConnection();
AsyncProcess ap = new MyAsyncProcess(hc, conf);
List puts = new ArrayList();
@@ -427,7 +427,7 @@ public class TestAsyncProcess {
@Test
public void testSubmitWithCB() throws Exception {
- ClusterConnection hc = createHConnection();
+ ClusterConnection hc = createConnection();
final AtomicInteger updateCalled = new AtomicInteger(0);
Batch.Callback cb = new Batch.Callback() {
@Override
@@ -448,7 +448,7 @@ public class TestAsyncProcess {
@Test
public void testSubmitBusyRegion() throws Exception {
- ClusterConnection hc = createHConnection();
+ ClusterConnection hc = createConnection();
AsyncProcess ap = new MyAsyncProcess(hc, conf);
List puts = new ArrayList();
@@ -466,7 +466,7 @@ public class TestAsyncProcess {
@Test
public void testSubmitBusyRegionServer() throws Exception {
- ClusterConnection hc = createHConnection();
+ ClusterConnection hc = createConnection();
AsyncProcess ap = new MyAsyncProcess(hc, conf);
ap.taskCounterPerServer.put(sn2, new AtomicInteger(ap.maxConcurrentTasksPerServer));
@@ -487,7 +487,7 @@ public class TestAsyncProcess {
@Test
public void testFail() throws Exception {
- MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, false);
+ MyAsyncProcess ap = new MyAsyncProcess(createConnection(), conf, false);
List puts = new ArrayList();
Put p = createPut(1, false);
@@ -513,7 +513,7 @@ public class TestAsyncProcess {
@Test
public void testSubmitTrue() throws IOException {
- final AsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, false);
+ final AsyncProcess ap = new MyAsyncProcess(createConnection(), conf, false);
ap.tasksInProgress.incrementAndGet();
final AtomicInteger ai = new AtomicInteger(1);
ap.taskCounterPerRegion.put(hri1.getRegionName(), ai);
@@ -552,7 +552,7 @@ public class TestAsyncProcess {
@Test
public void testFailAndSuccess() throws Exception {
- MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, false);
+ MyAsyncProcess ap = new MyAsyncProcess(createConnection(), conf, false);
List puts = new ArrayList();
puts.add(createPut(1, false));
@@ -579,7 +579,7 @@ public class TestAsyncProcess {
@Test
public void testFlush() throws Exception {
- MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, false);
+ MyAsyncProcess ap = new MyAsyncProcess(createConnection(), conf, false);
List puts = new ArrayList();
puts.add(createPut(1, false));
@@ -596,7 +596,7 @@ public class TestAsyncProcess {
@Test
public void testMaxTask() throws Exception {
- final AsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, false);
+ final AsyncProcess ap = new MyAsyncProcess(createConnection(), conf, false);
for (int i = 0; i < 1000; i++) {
ap.incTaskCounters(Arrays.asList("dummy".getBytes()), sn);
@@ -644,8 +644,8 @@ public class TestAsyncProcess {
Assert.assertTrue(start + 100L + sleepTime > end);
}
- private static ClusterConnection createHConnection() throws IOException {
- ClusterConnection hc = createHConnectionCommon();
+ private static ClusterConnection createConnection() throws IOException {
+ ClusterConnection hc = createConnectionCommon();
setMockLocation(hc, DUMMY_BYTES_1, new RegionLocations(loc1));
setMockLocation(hc, DUMMY_BYTES_2, new RegionLocations(loc2));
setMockLocation(hc, DUMMY_BYTES_3, new RegionLocations(loc3));
@@ -653,8 +653,8 @@ public class TestAsyncProcess {
return hc;
}
- private static ClusterConnection createHConnectionWithReplicas() throws IOException {
- ClusterConnection hc = createHConnectionCommon();
+ private static ClusterConnection createConnectionWithReplicas() throws IOException {
+ ClusterConnection hc = createConnectionCommon();
setMockLocation(hc, DUMMY_BYTES_1, hrls1);
setMockLocation(hc, DUMMY_BYTES_2, hrls2);
setMockLocation(hc, DUMMY_BYTES_3, hrls3);
@@ -667,7 +667,7 @@ public class TestAsyncProcess {
Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyInt())).thenReturn(result);
}
- private static ClusterConnection createHConnectionCommon() {
+ private static ClusterConnection createConnectionCommon() {
ClusterConnection hc = Mockito.mock(ClusterConnection.class);
NonceGenerator ng = Mockito.mock(NonceGenerator.class);
Mockito.when(ng.getNonceGroup()).thenReturn(HConstants.NO_NONCE);
@@ -679,7 +679,7 @@ public class TestAsyncProcess {
@Test
public void testHTablePutSuccess() throws Exception {
BufferedMutatorImpl ht = Mockito.mock(BufferedMutatorImpl.class);
- ht.ap = new MyAsyncProcess(createHConnection(), conf, true);
+ ht.ap = new MyAsyncProcess(createConnection(), conf, true);
Put put = createPut(1, true);
@@ -689,7 +689,7 @@ public class TestAsyncProcess {
}
private void doHTableFailedPut(boolean bufferOn) throws Exception {
- ClusterConnection conn = createHConnection();
+ ClusterConnection conn = createConnection();
HTable ht = new HTable(conn, new BufferedMutatorParams(DUMMY_TABLE));
MyAsyncProcess ap = new MyAsyncProcess(conn, conf, true);
ht.mutator.ap = ap;
@@ -737,7 +737,7 @@ public class TestAsyncProcess {
@Test
public void testHTableFailedPutAndNewPut() throws Exception {
- ClusterConnection conn = createHConnection();
+ ClusterConnection conn = createConnection();
BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, null, null,
new BufferedMutatorParams(DUMMY_TABLE).writeBufferSize(0));
MyAsyncProcess ap = new MyAsyncProcess(conn, conf, true);
@@ -976,7 +976,7 @@ public class TestAsyncProcess {
// TODO: this is kind of timing dependent... perhaps it should detect from createCaller
// that the replica call has happened and that way control the ordering.
Configuration conf = new Configuration();
- ClusterConnection conn = createHConnectionWithReplicas();
+ ClusterConnection conn = createConnectionWithReplicas();
conf.setInt(AsyncProcess.PRIMARY_CALL_TIMEOUT_KEY, replicaAfterMs * 1000);
if (retries >= 0) {
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
@@ -1076,7 +1076,7 @@ public class TestAsyncProcess {
@Test
public void testUncheckedException() throws Exception {
// Test the case pool.submit throws unchecked exception
- ClusterConnection hc = createHConnection();
+ ClusterConnection hc = createConnection();
MyThreadPoolExecutor myPool =
new MyThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS,
new LinkedBlockingQueue(200));
diff --git hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
index 0a5a37f..e8135a8 100644
--- hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
+++ hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java
@@ -104,7 +104,7 @@ public class TestClientNoCluster extends Configured implements Tool {
@Before
public void setUp() throws Exception {
this.conf = HBaseConfiguration.create();
- // Run my HConnection overrides. Use my little ConnectionImplementation below which
+ // Run my Connection overrides. Use my little ConnectionImplementation below which
// allows me insert mocks and also use my Registry below rather than the default zk based
// one so tests run faster and don't have zk dependency.
this.conf.set("hbase.client.registry.impl", SimpleRegistry.class.getName());
diff --git hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index de4964c..341314a 100644
--- hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -784,9 +784,9 @@ public final class HConstants {
* Parameter name for unique identifier for this {@link org.apache.hadoop.conf.Configuration}
* instance. If there are two or more {@link org.apache.hadoop.conf.Configuration} instances that,
* for all intents and purposes, are the same except for their instance ids, then they will not be
- * able to share the same org.apache.hadoop.hbase.client.HConnection instance. On the other hand,
+ * able to share the same org.apache.hadoop.hbase.client.Connection instance. On the other hand,
* even if the instance ids are the same, it could result in non-shared
- * org.apache.hadoop.hbase.client.HConnection instances if some of the other connection parameters
+ * org.apache.hadoop.hbase.client.Connection instances if some of the other connection parameters
* differ.
*/
public static final String HBASE_CLIENT_INSTANCE_ID = "hbase.client.instance.id";
diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java
index 3c29f4c..3db6d95 100644
--- hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java
+++ hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaPerf.java
@@ -103,7 +103,7 @@ public class IntegrationTestRegionReplicaPerf extends IntegrationTestBase {
private final Admin admin;
public PerfEvalCallable(Admin admin, String argv) {
- // TODO: this API is awkward, should take HConnection, not HBaseAdmin
+ // TODO: this API is awkward, should take Connection, not HBaseAdmin
this.admin = admin;
this.argv.addAll(Arrays.asList(argv.split(" ")));
LOG.debug("Created PerformanceEvaluationCallable with args: " + argv);
diff --git hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java
index 2a146b3..90510c4 100644
--- hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java
+++ hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java
@@ -41,10 +41,10 @@ import org.apache.hadoop.hbase.chaos.factories.MonkeyFactory;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.BufferedMutatorParams;
+import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
@@ -452,7 +452,8 @@ public class IntegrationTestBigLinkedListWithVisibility extends IntegrationTestB
@Override
protected void handleFailure(Counters counters) throws IOException {
Configuration conf = job.getConfiguration();
- HConnection conn = (HConnection) ConnectionFactory.createConnection(conf);
+ // TODO this is cheating currently, should do another pass to convert to Connection
+ ClusterConnection conn = (ClusterConnection)ConnectionFactory.createConnection(conf);
TableName tableName = TableName.valueOf(COMMON_TABLE_NAME);
CounterGroup g = counters.getGroup("undef");
Iterator it = g.iterator();
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
index 9c292cc..5658f20 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
@@ -113,7 +113,8 @@ public class LocalHBaseCluster {
}
@SuppressWarnings("unchecked")
- private static Class extends HRegionServer> getRegionServerImplementation(final Configuration conf) {
+ private static Class extends HRegionServer> getRegionServerImplementation(
+ final Configuration conf) {
return (Class extends HRegionServer>)conf.getClass(HConstants.REGION_SERVER_IMPL,
HRegionServer.class);
}
@@ -175,7 +176,7 @@ public class LocalHBaseCluster {
Configuration config, final int index)
throws IOException {
// Create each regionserver with its own Configuration instance so each has
- // its HConnection instance rather than share (see HBASE_INSTANCES down in
+ // its Connection instance rather than share (see HBASE_INSTANCES down in
// the guts of ConnectionManager).
// Also, create separate CoordinatedStateManager instance per Server.
@@ -209,7 +210,7 @@ public class LocalHBaseCluster {
public JVMClusterUtil.MasterThread addMaster(Configuration c, final int index)
throws IOException {
// Create each master with its own Configuration instance so each has
- // its HConnection instance rather than share (see HBASE_INSTANCES down in
+ // its Connection instance rather than share (see HBASE_INSTANCES down in
// the guts of ConnectionManager.
// Also, create separate CoordinatedStateManager instance per Server.
@@ -460,7 +461,8 @@ public class LocalHBaseCluster {
* @return True if a 'local' address in hbase.master value.
*/
public static boolean isLocal(final Configuration c) {
- boolean mode = c.getBoolean(HConstants.CLUSTER_DISTRIBUTED, HConstants.DEFAULT_CLUSTER_DISTRIBUTED);
+ boolean mode = c.getBoolean(HConstants.CLUSTER_DISTRIBUTED,
+ HConstants.DEFAULT_CLUSTER_DISTRIBUTED);
return(mode == HConstants.CLUSTER_IS_LOCAL);
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java
index 85b1135..2a9a626 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java
@@ -24,7 +24,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -44,7 +44,7 @@ class HFileArchiveManager {
private final ZooKeeperWatcher zooKeeper;
private volatile boolean stopped = false;
- public HFileArchiveManager(HConnection connection, Configuration conf)
+ public HFileArchiveManager(Connection connection, Configuration conf)
throws ZooKeeperConnectionException, IOException {
this.zooKeeper = new ZooKeeperWatcher(conf, "hfileArchiveManager-on-" + connection.toString(),
connection);
@@ -67,7 +67,8 @@ class HFileArchiveManager {
/**
* Stop retaining HFiles for the given table in the archive. HFiles will be cleaned up on the next
- * pass of the {@link HFileCleaner}, if the HFiles are retained by another cleaner.
+ * pass of the {@link org.apache.hadoop.hbase.master.cleaner.HFileCleaner}, if the HFiles are
+ * retained by another cleaner.
* @param table name of the table for which to disable hfile retention.
* @return this for chaining.
* @throws KeeperException if if we can't reach zookeeper to update the hfile cleaner.
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorClusterConnection.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorClusterConnection.java
new file mode 100644
index 0000000..8eae851
--- /dev/null
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorClusterConnection.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.hadoop.hbase.security.UserProvider;
+
+/**
+ * Connection to an HTable from within a Coprocessor. We can do some nice tricks since we know we
+ * are on a regionserver, for instance skipping the full serialization/deserialization of objects
+ * when talking to the server.
+ *
+ * You should not use this class from any client - its an internal class meant for use by the
+ * coprocessor framework.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class CoprocessorClusterConnection extends ConnectionImplementation {
+ private static final NonceGenerator NO_NONCE_GEN = new NoNonceGenerator();
+
+ /**
+ * Create an {@link ClusterConnection} based on the environment in which we are running the
+ * coprocessor. The {@link ClusterConnection} must be externally cleaned up (we bypass the
+ * usual HTable cleanup mechanisms since we own everything).
+ * @param env environment hosting the {@link ClusterConnection}
+ * @return instance of {@link ClusterConnection}.
+ * @throws IOException if we cannot create the connection
+ */
+ public static ClusterConnection getConnectionForEnvironment(CoprocessorEnvironment env)
+ throws IOException {
+ // this bit is a little hacky - just trying to get it going for the moment
+ if (env instanceof RegionCoprocessorEnvironment) {
+ RegionCoprocessorEnvironment e = (RegionCoprocessorEnvironment) env;
+ RegionServerServices services = e.getRegionServerServices();
+ if (services instanceof HRegionServer) {
+ return new CoprocessorClusterConnection((HRegionServer) services);
+ }
+ }
+ return (ClusterConnection) ConnectionFactory.createConnection(env.getConfiguration());
+ }
+
+ private final ServerName serverName;
+ private final HRegionServer server;
+
+ /**
+ * Constructor that uses server configuration
+ * @param server
+ * @throws IOException if we cannot create the connection
+ */
+ public CoprocessorClusterConnection(HRegionServer server) throws IOException {
+ this(server.getConfiguration(), server);
+ }
+
+ /**
+ * Constructor that accepts custom configuration
+ * @param conf
+ * @param server
+ * @throws IOException if we cannot create the connection
+ */
+ public CoprocessorClusterConnection(Configuration conf, HRegionServer server) throws IOException {
+ super(conf, null, UserProvider.instantiate(conf).getCurrent());
+ this.server = server;
+ this.serverName = server.getServerName();
+ }
+
+ @Override
+ public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService.BlockingInterface
+ getClient(ServerName serverName) throws IOException {
+ // client is trying to reach off-server, so we can't do anything special
+ if (!this.serverName.equals(serverName)) {
+ return super.getClient(serverName);
+ }
+ // the client is attempting to write to the same regionserver, we can short-circuit to our
+ // local regionserver
+ return server.getRSRpcServices();
+ }
+
+ @Override
+ public NonceGenerator getNonceGenerator() {
+ return NO_NONCE_GEN; // don't use nonces for coprocessor connection
+ }
+}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java
deleted file mode 100644
index 285737d..0000000
--- hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.RegionServerServices;
-import org.apache.hadoop.hbase.security.UserProvider;
-
-/**
- * Connection to an HTable from within a Coprocessor. We can do some nice tricks since we know we
- * are on a regionserver, for instance skipping the full serialization/deserialization of objects
- * when talking to the server.
- *
- * You should not use this class from any client - its an internal class meant for use by the
- * coprocessor framework.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class CoprocessorHConnection extends ConnectionImplementation {
- private static final NonceGenerator NO_NONCE_GEN = new NoNonceGenerator();
-
- /**
- * Create an {@link HConnection} based on the environment in which we are running the
- * coprocessor. The {@link HConnection} must be externally cleaned up (we bypass the usual HTable
- * cleanup mechanisms since we own everything).
- * @param env environment hosting the {@link HConnection}
- * @return instance of {@link HConnection}.
- * @throws IOException if we cannot create the connection
- */
- public static ClusterConnection getConnectionForEnvironment(CoprocessorEnvironment env)
- throws IOException {
- // this bit is a little hacky - just trying to get it going for the moment
- if (env instanceof RegionCoprocessorEnvironment) {
- RegionCoprocessorEnvironment e = (RegionCoprocessorEnvironment) env;
- RegionServerServices services = e.getRegionServerServices();
- if (services instanceof HRegionServer) {
- return new CoprocessorHConnection((HRegionServer) services);
- }
- }
- return (ClusterConnection) ConnectionFactory.createConnection(env.getConfiguration());
- }
-
- private final ServerName serverName;
- private final HRegionServer server;
-
- /**
- * Constructor that uses server configuration
- * @param server
- * @throws IOException if we cannot create the connection
- */
- public CoprocessorHConnection(HRegionServer server) throws IOException {
- this(server.getConfiguration(), server);
- }
-
- /**
- * Constructor that accepts custom configuration
- * @param conf
- * @param server
- * @throws IOException if we cannot create the connection
- */
- public CoprocessorHConnection(Configuration conf, HRegionServer server) throws IOException {
- super(conf, null, UserProvider.instantiate(conf).getCurrent());
- this.server = server;
- this.serverName = server.getServerName();
- }
-
- @Override
- public org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService.BlockingInterface
- getClient(ServerName serverName) throws IOException {
- // client is trying to reach off-server, so we can't do anything special
- if (!this.serverName.equals(serverName)) {
- return super.getClient(serverName);
- }
- // the client is attempting to write to the same regionserver, we can short-circuit to our
- // local regionserver
- return server.getRSRpcServices();
- }
-
- @Override
- public NonceGenerator getNonceGenerator() {
- return NO_NONCE_GEN; // don't use nonces for coprocessor connection
- }
-}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
index 7865cc0..f031e6a 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java
@@ -71,7 +71,7 @@ public final class HTableWrapper implements Table {
public static Table createWrapper(List
openTables,
TableName tableName, Environment env, ExecutorService pool) throws IOException {
return new HTableWrapper(openTables, tableName,
- CoprocessorHConnection.getConnectionForEnvironment(env), pool);
+ CoprocessorClusterConnection.getConnectionForEnvironment(env), pool);
}
private HTableWrapper(List openTables, TableName tableName,
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
index 7a59ea1..12d0d57 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HTable;
@@ -294,14 +295,13 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
* of a job using HFileOutputFormat
* @param table the table to load into
* @throws TableNotFoundException if table does not yet exist
+ * @deprecated use {#doBulkLoad(Path, Connection, TableName)} since 2.0; remove in 3.0
*/
+ @Deprecated
@SuppressWarnings("deprecation")
public void doBulkLoad(Path hfofDir, final HTable table)
- throws TableNotFoundException, IOException {
- try (Admin admin = table.getConnection().getAdmin();
- RegionLocator rl = table.getRegionLocator()) {
- doBulkLoad(hfofDir, admin, table, rl);
- }
+ throws TableNotFoundException, IOException {
+ doBulkLoad(hfofDir, table.getConnection(), table.getName());
}
/**
@@ -312,18 +312,37 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
* of a job using HFileOutputFormat
* @param table the table to load into
* @throws TableNotFoundException if table does not yet exist
+ * @deprecated use {#doBulkLoad(Path, Connection, TableName)}; remove in 3.0
*/
- @SuppressWarnings("deprecation")
+ @Deprecated
public void doBulkLoad(Path hfofDir, final Admin admin, Table table,
- RegionLocator regionLocator) throws TableNotFoundException, IOException {
+ RegionLocator regionLocator) throws TableNotFoundException, IOException {
+ doBulkLoad(hfofDir, admin.getConnection(), table.getName());
+ }
+
+ /**
+ * Perform a bulk load of the given directory into the given
+ * pre-existing table. This method is not threadsafe.
+ *
+ * @param hfofDir the directory that was provided as the output path
+ * of a job using HFileOutputFormat
+ * @param conn the connection object to use
+ * @param tableName the table to load into
+ * @throws TableNotFoundException if table does not yet exist
+ */
+ public void doBulkLoad(Path hfofDir, final Connection conn, TableName tableName)
+ throws TableNotFoundException, IOException {
+ Admin admin = conn.getAdmin();
+ RegionLocator regionLocator = conn.getRegionLocator(tableName);
+ Table table = conn.getTable(tableName);
if (!admin.isTableAvailable(regionLocator.getName())) {
- throw new TableNotFoundException("Table " + table.getName() + "is not currently available.");
+ throw new TableNotFoundException("Table " + tableName + "is not currently available.");
}
// initialize thread pools
int nrThreads = getConf().getInt("hbase.loadincremental.threads.max",
- Runtime.getRuntime().availableProcessors());
+ Runtime.getRuntime().availableProcessors());
ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
builder.setNameFormat("LoadIncrementalHFiles-%1$d");
ExecutorService pool = new ThreadPoolExecutor(nrThreads, nrThreads,
@@ -415,7 +434,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
+ " hfiles to one family of one region");
}
- bulkLoadPhase(table, admin.getConnection(), pool, queue, regionGroups);
+ bulkLoadPhase(table, (ClusterConnection)conn, pool, queue, regionGroups);
// NOTE: The next iteration's split / group could happen in parallel to
// atomic bulkloads assuming that there are splits and no merges, and
@@ -451,7 +470,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
* them. Any failures are re-queued for another pass with the
* groupOrSplitPhase.
*/
- protected void bulkLoadPhase(final Table table, final Connection conn,
+ protected void bulkLoadPhase(final Table table, final ClusterConnection conn,
ExecutorService pool, Deque queue,
final Multimap regionGroups) throws IOException {
// atomically bulk load the groups.
@@ -712,7 +731,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
* @return empty list if success, list of items to retry on recoverable
* failure
*/
- protected List tryAtomicRegionLoad(final Connection conn,
+ protected List tryAtomicRegionLoad(final ClusterConnection conn,
final TableName tableName, final byte[] first, Collection lqis)
throws IOException {
final List> famPaths =
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 2ce2193..9b8f59f 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -685,7 +685,7 @@ public class HRegionServer extends HasThread implements
}
/**
- * Create a 'smarter' HConnection, one that is capable of by-passing RPC if the request is to
+ * Create a 'smarter' Connection, one that is capable of by-passing RPC if the request is to
* the local server. Safe to use going to local or remote server.
* Create this instance in a method can be intercepted and mocked in tests.
* @throws IOException
@@ -1048,7 +1048,7 @@ public class HRegionServer extends HasThread implements
} catch (IOException e) {
// Although the {@link Closeable} interface throws an {@link
// IOException}, in reality, the implementation would never do that.
- LOG.warn("Attempt to close server's short circuit HConnection failed.", e);
+ LOG.warn("Attempt to close server's short circuit Connection failed.", e);
}
}
@@ -1753,7 +1753,7 @@ public class HRegionServer extends HasThread implements
// Create the log splitting worker and start it
// set a smaller retries to fast fail otherwise splitlogworker could be blocked for
- // quite a while inside HConnection layer. The worker won't be available for other
+ // quite a while inside Connection layer. The worker won't be available for other
// tasks even after current task is preempted after a split task times out.
Configuration sinkConf = HBaseConfiguration.create(conf);
sinkConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 5729334..97633bc 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -553,7 +553,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
throw new DoNotRetryIOException("Atomic put and/or delete only, not " + type.name());
}
}
- return region.checkAndRowMutate(row, family, qualifier, compareOp, comparator, rm, Boolean.TRUE);
+ return region.checkAndRowMutate(row, family, qualifier, compareOp, comparator, rm,
+ Boolean.TRUE);
}
/**
@@ -947,8 +948,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
}
priority = createPriority();
String name = rs.getProcessName() + "/" + initialIsa.toString();
- // Set how many times to retry talking to another server over HConnection.
- ConnectionUtils.setServerSideHConnectionRetriesConfig(rs.conf, name, LOG);
+ // Set how many times to retry talking to another server over Connection.
+ ConnectionUtils.setServerSideConnectionRetriesConfig(rs.conf, name, LOG);
try {
rpcServer = new RpcServer(rs, name, getServices(),
bindAddress, // use final bindAddress for this server.
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java
index 9d5e052..e39b2e8 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java
@@ -36,11 +36,14 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.RegionServerCallable;
import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
@@ -65,7 +68,7 @@ public class WALEditsReplaySink {
private static final int MAX_BATCH_SIZE = 1024;
private final Configuration conf;
- private final HConnection conn;
+ private final ClusterConnection conn;
private final TableName tableName;
private final MetricsWALEditsReplay metrics;
private final AtomicLong totalReplayedEdits = new AtomicLong();
@@ -80,7 +83,7 @@ public class WALEditsReplaySink {
* @param conn
* @throws IOException
*/
- public WALEditsReplaySink(Configuration conf, TableName tableName, HConnection conn)
+ public WALEditsReplaySink(Configuration conf, TableName tableName, ClusterConnection conn)
throws IOException {
this.conf = conf;
this.metrics = new MetricsWALEditsReplay();
@@ -186,7 +189,7 @@ public class WALEditsReplaySink {
private HRegionInfo regionInfo;
private List entries;
- ReplayServerCallable(final HConnection connection, final TableName tableName,
+ ReplayServerCallable(final ClusterConnection connection, final TableName tableName,
final HRegionLocation regionLoc, final HRegionInfo regionInfo,
final List entries) {
super(connection, tableName, null);
@@ -235,7 +238,8 @@ public class WALEditsReplaySink {
List cells = edit.getCells();
for (Cell cell : cells) {
// filtering WAL meta entries
- setLocation(conn.locateRegion(tableName, CellUtil.cloneRow(cell)));
+ RegionLocator rl = conn.getRegionLocator(tableName);
+ setLocation(rl.getRegionLocation(CellUtil.cloneRow(cell)));
skip = true;
break;
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
index 4c719a9..ec84a59 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
@@ -38,8 +38,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
import org.apache.hadoop.hbase.util.Bytes;
@@ -64,7 +64,7 @@ import org.apache.hadoop.ipc.RemoteException;
public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoint {
private static final Log LOG = LogFactory.getLog(HBaseInterClusterReplicationEndpoint.class);
- private HConnection conn;
+ private ClusterConnection conn;
private Configuration conf;
@@ -94,7 +94,7 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi
// TODO: This connection is replication specific or we should make it particular to
// replication and make replication specific settings such as compression or codec to use
// passing Cells.
- this.conn = (HConnection) ConnectionFactory.createConnection(this.conf);
+ this.conn = (ClusterConnection) ConnectionFactory.createConnection(this.conf);
this.sleepForRetries =
this.conf.getLong("replication.source.sleepforretries", 1000);
this.metrics = context.getMetrics();
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
index b3db0f6..f8f8416 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
@@ -561,7 +561,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint {
if (cause instanceof IOException) {
// The table can be disabled or dropped at this time. For disabled tables, we have no
// cheap mechanism to detect this case because meta does not contain this information.
- // HConnection.isTableDisabled() is a zk call which we cannot do for every replay RPC.
+ // Connection.isTableDisabled() is a zk call which we cannot do for every replay RPC.
// So instead we start the replay RPC with retries and
// check whether the table is dropped or disabled which might cause
// SocketTimeoutException, or RetriesExhaustedException or similar if we get IOE.
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
index 76fa6c2..ee79136 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSinkManager.java
@@ -27,7 +27,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
import com.google.common.collect.Lists;
@@ -56,7 +56,7 @@ public class ReplicationSinkManager {
static final float DEFAULT_REPLICATION_SOURCE_RATIO = 0.1f;
- private final HConnection conn;
+ private final ClusterConnection conn;
private final String peerClusterId;
@@ -88,7 +88,7 @@ public class ReplicationSinkManager {
* @param conf HBase configuration, used for determining replication source ratio and bad peer
* threshold
*/
- public ReplicationSinkManager(HConnection conn, String peerClusterId,
+ public ReplicationSinkManager(ClusterConnection conn, String peerClusterId,
HBaseReplicationEndpoint endpoint, Configuration conf) {
this.conn = conn;
this.peerClusterId = peerClusterId;
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java
index 1eeeee0..ad48820 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Logger;
/**
- * A utility to store user specific HConnections in memory.
+ * A utility to store user specific Connections in memory.
* There is a chore to clean up connections idle for too long.
* This class is used by REST server and Thrift server to
* support authentication and impersonation.
@@ -207,7 +207,7 @@ public class ConnectionCache {
return false;
}
if (connection.isAborted() || connection.isClosed()) {
- LOG.info("Unexpected: cached HConnection is aborted/closed, removed from cache");
+ LOG.info("Unexpected: cached Connection is aborted/closed, removed from cache");
connections.remove(userName);
return false;
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index fa138be..86bea0c 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -102,7 +102,6 @@ import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
@@ -3981,10 +3980,10 @@ public class HBaseFsck extends Configured implements Closeable {
private HBaseFsck hbck;
private ServerName rsinfo;
private ErrorReporter errors;
- private HConnection connection;
+ private ClusterConnection connection;
WorkItemRegion(HBaseFsck hbck, ServerName info,
- ErrorReporter errors, HConnection connection) {
+ ErrorReporter errors, ClusterConnection connection) {
this.hbck = hbck;
this.rsinfo = info;
this.errors = errors;
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index e681789..e3f5f11 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.RegionState;
@@ -64,7 +63,7 @@ public class HBaseFsckRepair {
* @param region Region to undeploy
* @param servers list of Servers to undeploy from
*/
- public static void fixMultiAssignment(HConnection connection, HRegionInfo region,
+ public static void fixMultiAssignment(ClusterConnection connection, HRegionInfo region,
List servers)
throws IOException, KeeperException, InterruptedException {
HRegionInfo actualRegion = new HRegionInfo(region);
@@ -99,14 +98,7 @@ public class HBaseFsckRepair {
}
/**
- * In 0.90, this forces an HRI offline by setting the RegionTransitionData
- * in ZK to have HBCK_CODE_NAME as the server. This is a special case in
- * the AssignmentManager that attempts an assign call by the master.
- *
- * @see org.apache.hadoop.hbase.master.AssignementManager#handleHBCK
- *
- * This doesn't seem to work properly in the updated version of 0.92+'s hbck
- * so we use assign to force the region into transition. This has the
+ * We use assign to force the region into transition. This has the
* side-effect of requiring a HRegionInfo that considers regionId (timestamp)
* in comparators that is addressed by HBASE-5563.
*/
@@ -149,12 +141,11 @@ public class HBaseFsckRepair {
* (default 120s) to close the region. This bypasses the active hmaster.
*/
@SuppressWarnings("deprecation")
- public static void closeRegionSilentlyAndWait(HConnection connection,
+ public static void closeRegionSilentlyAndWait(ClusterConnection connection,
ServerName server, HRegionInfo region) throws IOException, InterruptedException {
long timeout = connection.getConfiguration()
.getLong("hbase.hbck.close.timeout", 120000);
- ServerManager.closeRegionSilentlyAndWait((ClusterConnection)connection, server,
- region, timeout);
+ ServerManager.closeRegionSilentlyAndWait(connection, server, region, timeout);
}
/**
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
index 53c5cef..e86d32a 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
@@ -37,10 +37,10 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Table;
@@ -103,10 +103,10 @@ class HMerge {
final TableName tableName, final boolean testMasterRunning)
throws IOException {
boolean masterIsRunning = false;
- HConnection hConnection = null;
+ ClusterConnection hConnection = null;
if (testMasterRunning) {
try {
- hConnection = (HConnection) ConnectionFactory.createConnection(conf);
+ hConnection = (ClusterConnection) ConnectionFactory.createConnection(conf);
masterIsRunning = hConnection.isMasterRunning();
} finally {
if (hConnection != null) {
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
index ed72ea2..b917263 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java
@@ -34,9 +34,9 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
@@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.client.coprocessor.Batch;
@InterfaceAudience.Private
public class MultiHConnection {
private static final Log LOG = LogFactory.getLog(MultiHConnection.class);
- private HConnection[] hConnections;
+ private ClusterConnection[] hConnections;
private final Object hConnectionsLock = new Object();
private int noOfConnections;
private ExecutorService batchPool;
@@ -62,9 +62,9 @@ public class MultiHConnection {
throws IOException {
this.noOfConnections = noOfConnections;
synchronized (this.hConnectionsLock) {
- hConnections = new HConnection[noOfConnections];
+ hConnections = new ClusterConnection[noOfConnections];
for (int i = 0; i < noOfConnections; i++) {
- HConnection conn = (HConnection) ConnectionFactory.createConnection(conf);
+ ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(conf);
hConnections[i] = conn;
}
}
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java
index bf44a50..f97c1b6 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java
@@ -32,7 +32,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationFactory;
import org.apache.hadoop.hbase.replication.ReplicationPeers;
@@ -58,7 +58,7 @@ public class ReplicationChecker {
// replicator with its queueIds for removed peers
private Map> undeletedQueueIds = new HashMap>();
- public ReplicationChecker(Configuration conf, ZooKeeperWatcher zkw, HConnection connection,
+ public ReplicationChecker(Configuration conf, ZooKeeperWatcher zkw, Connection connection,
ErrorReporter errorReporter) throws IOException {
try {
this.errorReporter = errorReporter;
diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 3741cdf..4e5c30d 100644
--- hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -70,11 +70,12 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.TableState;
@@ -1639,8 +1640,8 @@ public class WALSplitter {
private final Map onlineRegions =
new ConcurrentHashMap();
- private Map tableNameToHConnectionMap = Collections
- .synchronizedMap(new TreeMap());
+ private Map tableNameToConnectionMap = Collections
+ .synchronizedMap(new TreeMap());
/**
* Map key -> value layout
* : -> Queue
@@ -1747,7 +1748,7 @@ public class WALSplitter {
String locKey = null;
List cells = edit.getCells();
List skippedCells = new ArrayList();
- HConnection hconn = this.getConnectionByTableName(table);
+ ClusterConnection conn = this.getConnectionByTableName(table);
for (Cell cell : cells) {
byte[] row = CellUtil.cloneRow(cell);
@@ -1775,7 +1776,7 @@ public class WALSplitter {
try {
loc =
- locateRegionAndRefreshLastFlushedSequenceId(hconn, table, row,
+ locateRegionAndRefreshLastFlushedSequenceId(conn, table, row,
encodeRegionNameStr);
// skip replaying the compaction if the region is gone
if (isCompactionEntry && !encodeRegionNameStr.equalsIgnoreCase(
@@ -1849,13 +1850,13 @@ public class WALSplitter {
* destination region is online for replay.
* @throws IOException
*/
- private HRegionLocation locateRegionAndRefreshLastFlushedSequenceId(HConnection hconn,
+ private HRegionLocation locateRegionAndRefreshLastFlushedSequenceId(Connection conn,
TableName table, byte[] row, String originalEncodedRegionName) throws IOException {
// fetch location from cache
HRegionLocation loc = onlineRegions.get(originalEncodedRegionName);
if(loc != null) return loc;
// fetch location from hbase:meta directly without using cache to avoid hit old dead server
- loc = hconn.getRegionLocation(table, row, true);
+ loc = conn.getRegionLocator(table).getRegionLocation(row, true);
if (loc == null) {
throw new IOException("Can't locate location for row:" + Bytes.toString(row)
+ " of table:" + table);
@@ -1947,11 +1948,11 @@ public class WALSplitter {
while (endTime > EnvironmentEdgeManager.currentTime()) {
try {
// Try and get regioninfo from the hosting server.
- HConnection hconn = getConnectionByTableName(tableName);
+ ClusterConnection conn = getConnectionByTableName(tableName);
if(reloadLocation) {
- loc = hconn.getRegionLocation(tableName, row, true);
+ loc = conn.getRegionLocation(tableName, row, true);
}
- BlockingInterface remoteSvr = hconn.getAdmin(loc.getServerName());
+ BlockingInterface remoteSvr = conn.getAdmin(loc.getServerName());
HRegionInfo region = loc.getRegionInfo();
try {
GetRegionInfoRequest request =
@@ -2077,13 +2078,13 @@ public class WALSplitter {
}
// close connections
- synchronized (this.tableNameToHConnectionMap) {
- for (Map.Entry entry :
- this.tableNameToHConnectionMap.entrySet()) {
- HConnection hconn = entry.getValue();
+ synchronized (this.tableNameToConnectionMap) {
+ for (Map.Entry entry :
+ this.tableNameToConnectionMap.entrySet()) {
+ ClusterConnection conn = entry.getValue();
try {
- hconn.clearRegionCache();
- hconn.close();
+ conn.clearRegionCache();
+ conn.close();
} catch (IOException ioe) {
result.add(ioe);
}
@@ -2147,29 +2148,30 @@ public class WALSplitter {
throw new IOException("Invalid location string:" + loc + " found. Replay aborted.");
}
- HConnection hconn = getConnectionByTableName(tableName);
+ ClusterConnection conn = getConnectionByTableName(tableName);
synchronized (writers) {
ret = writers.get(loc);
if (ret == null) {
- ret = new RegionServerWriter(conf, tableName, hconn);
+ ret = new RegionServerWriter(conf, tableName, conn);
writers.put(loc, ret);
}
}
return ret;
}
- private HConnection getConnectionByTableName(final TableName tableName) throws IOException {
- HConnection hconn = this.tableNameToHConnectionMap.get(tableName);
- if (hconn == null) {
- synchronized (this.tableNameToHConnectionMap) {
- hconn = this.tableNameToHConnectionMap.get(tableName);
- if (hconn == null) {
- hconn = (HConnection) ConnectionFactory.createConnection(conf);
- this.tableNameToHConnectionMap.put(tableName, hconn);
+ private ClusterConnection getConnectionByTableName(final TableName tableName)
+ throws IOException {
+ ClusterConnection conn = this.tableNameToConnectionMap.get(tableName);
+ if (conn == null) {
+ synchronized (this.tableNameToConnectionMap) {
+ conn = this.tableNameToConnectionMap.get(tableName);
+ if (conn == null) {
+ conn = (ClusterConnection) ConnectionFactory.createConnection(conf);
+ this.tableNameToConnectionMap.put(tableName, conn);
}
}
}
- return hconn;
+ return conn;
}
private TableName getTableFromLocationStr(String loc) {
/**
@@ -2190,7 +2192,8 @@ public class WALSplitter {
private final static class RegionServerWriter extends SinkWriter {
final WALEditsReplaySink sink;
- RegionServerWriter(final Configuration conf, final TableName tableName, final HConnection conn)
+ RegionServerWriter(final Configuration conf, final TableName tableName,
+ final ClusterConnection conn)
throws IOException {
this.sink = new WALEditsReplaySink(conf, tableName, conn);
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 021f626..fb246d4 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -65,7 +65,6 @@ import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
@@ -359,8 +358,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
/**
* Returns this classes's instance of {@link Configuration}. Be careful how
- * you use the returned Configuration since {@link HConnection} instances
- * can be shared. The Map of HConnections is keyed by the Configuration. If
+ * you use the returned Configuration since {@link Connection} instances
+ * can be shared. The Map of Connections is keyed by the Configuration. If
* say, a Connection was being used against a cluster that had been shutdown,
* see {@link #shutdownMiniCluster()}, then the Connection will no longer
* be wholesome. Rather than use the return direct, its usually best to
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
index 9943749..707f467 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
@@ -218,7 +218,7 @@ public class TestMetaTableLocator {
* HBASE-4470 as there it is thrown from getHConnection() and
* here it is thrown from get() -- but those are both called
* from the same function anyway, and this way is less invasive than
- * throwing from getHConnection would be.
+ * throwing from getConnection would be.
*
* @throws IOException
* @throws InterruptedException
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
index 06fdd7f..a524d41 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
@@ -41,7 +41,7 @@ public class HConnectionTestingUtility {
* {@link ConnectionImplementation} innards to HBaseTestingUtility to give it access.
*/
/**
- * Get a Mocked {@link HConnection} that goes with the passed conf
+ * Get a Mocked {@link ClusterConnection} that goes with the passed conf
* configuration instance. Minimally the mock will return
* conf when {@link ClusterConnection#getConfiguration()} is invoked.
* Be sure to shutdown the connection when done by calling
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index 9c9ec87..a83528f 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -4038,13 +4038,13 @@ public class TestFromClientSide {
/**
* simple test that just executes parts of the client
- * API that accept a pre-created HConnection instance
+ * API that accept a pre-created Connection instance
*
* @throws IOException
*/
@Test
- public void testUnmanagedHConnection() throws IOException {
- final TableName tableName = TableName.valueOf("testUnmanagedHConnection");
+ public void testUnmanagedConnection() throws IOException {
+ final TableName tableName = TableName.valueOf("testUnmanagedConnection");
TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
Table t = conn.getTable(tableName);
@@ -4055,14 +4055,14 @@ public class TestFromClientSide {
}
/**
- * test of that unmanaged HConnections are able to reconnect
+ * test of that unmanaged Connections are able to reconnect
* properly (see HBASE-5058)
*
* @throws Exception
*/
@Test
- public void testUnmanagedHConnectionReconnect() throws Exception {
- final TableName tableName = TableName.valueOf("testUnmanagedHConnectionReconnect");
+ public void testUnmanagedConnectionReconnect() throws Exception {
+ final TableName tableName = TableName.valueOf("testUnmanagedConnectionReconnect");
TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
Table t = conn.getTable(tableName);
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
index 16465d2..9d9f42f 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
@@ -200,7 +200,7 @@ public class TestHCM {
}
/**
- * Naive test to check that HConnection#getAdmin returns a properly constructed HBaseAdmin object
+ * Naive test to check that Connection#getAdmin returns a properly constructed HBaseAdmin object
* @throws IOException Unable to construct admin
*/
@Test
@@ -875,7 +875,7 @@ public class TestHCM {
TEST_UTIL.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT));
// This should be enough to connect
- HConnection conn = (HConnection) ConnectionFactory.createConnection(c);
+ ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(c);
assertTrue(conn.isMasterRunning());
conn.close();
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
index b773b46..7c33f55 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
@@ -303,7 +303,7 @@ public class TestReplicaWithCluster {
HTU2.deleteTable(hdt.getTableName());
// We shutdown HTU2 minicluster later, in afterClass(), as shutting down
- // the minicluster has negative impact of deleting all HConnections in JVM.
+ // the minicluster has negative impact of deleting all Connections in JVM.
}
@Test (timeout=30000)
@@ -332,7 +332,7 @@ public class TestReplicaWithCluster {
// bulk load HFiles
LOG.debug("Loading test data");
@SuppressWarnings("deprecation")
- final HConnection conn = HTU.getHBaseAdmin().getConnection();
+ final ClusterConnection conn = (ClusterConnection) HTU.getHBaseAdmin().getConnection();
RegionServerCallable callable = new RegionServerCallable(
conn, hdt.getTableName(), TestHRegionServerBulkLoad.rowkey(0)) {
@Override
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java
index e84d34c..f84b93e 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java
@@ -69,7 +69,7 @@ public class TestShortCircuitConnection {
HTableInterface tableIf = connection.getTable(tn);
assertTrue(tableIf instanceof HTable);
HTable table = (HTable) tableIf;
- assertTrue(table.getConnection() == connection);
+ assertTrue(table.getConnection() == connection); // deprecated, remove in 3.0
AdminService.BlockingInterface admin = connection.getAdmin(regionServer.getServerName());
ClientService.BlockingInterface client = connection.getClient(regionServer.getServerName());
assertTrue(admin instanceof RSRpcServices);
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
index 052e05c..fcb9989 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
@@ -46,9 +46,9 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
@@ -278,12 +278,12 @@ public class TestLoadIncrementalHFilesSplitRecovery {
setupTable(connection, table, 10);
LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {
@Override
- protected List tryAtomicRegionLoad(final Connection conn,
+ protected List tryAtomicRegionLoad(final ClusterConnection conn,
TableName tableName, final byte[] first, Collection lqis)
throws IOException {
int i = attmptedCalls.incrementAndGet();
if (i == 1) {
- Connection errConn = null;
+ ClusterConnection errConn = null;
try {
errConn = getMockedConnection(util.getConfiguration());
} catch (Exception e) {
@@ -291,10 +291,10 @@ public class TestLoadIncrementalHFilesSplitRecovery {
throw new RuntimeException("mocking cruft, should never happen");
}
failedCalls.incrementAndGet();
- return super.tryAtomicRegionLoad((HConnection)errConn, tableName, first, lqis);
+ return super.tryAtomicRegionLoad(errConn, tableName, first, lqis);
}
- return super.tryAtomicRegionLoad((HConnection)conn, tableName, first, lqis);
+ return super.tryAtomicRegionLoad(conn, tableName, first, lqis);
}
};
try {
@@ -314,9 +314,9 @@ public class TestLoadIncrementalHFilesSplitRecovery {
}
@SuppressWarnings("deprecation")
- private HConnection getMockedConnection(final Configuration conf)
+ private ClusterConnection getMockedConnection(final Configuration conf)
throws IOException, ServiceException {
- HConnection c = Mockito.mock(HConnection.class);
+ ClusterConnection c = Mockito.mock(ClusterConnection.class);
Mockito.when(c.getConfiguration()).thenReturn(conf);
Mockito.doNothing().when(c).close();
// Make it so we return a particular location when asked.
@@ -355,7 +355,7 @@ public class TestLoadIncrementalHFilesSplitRecovery {
final AtomicInteger attemptedCalls = new AtomicInteger();
LoadIncrementalHFiles lih2 = new LoadIncrementalHFiles(util.getConfiguration()) {
@Override
- protected void bulkLoadPhase(final Table htable, final Connection conn,
+ protected void bulkLoadPhase(final Table htable, final ClusterConnection conn,
ExecutorService pool, Deque queue,
final Multimap regionGroups) throws IOException {
int i = attemptedCalls.incrementAndGet();
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index b5f20f5..b974f97 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -133,8 +133,8 @@ public class TestCatalogJanitor {
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
- // Mock an HConnection and a AdminProtocol implementation. Have the
- // HConnection return the HRI. Have the HRI return a few mocked up responses
+ // Mock an Connection and a AdminProtocol implementation. Have the
+ // Connection return the HRI. Have the HRI return a few mocked up responses
// to make our test work.
this.connection =
HConnectionTestingUtility.getMockedConnectionAndDecorate(this.c,
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
index 972834a..42e2a3e 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
@@ -208,7 +208,7 @@ public class TestMasterNoCluster {
@Override
public ClusterConnection getConnection() {
// Insert a mock for the connection, use TESTUTIL.getConfiguration rather than
- // the conf from the master; the conf will already have an HConnection
+ // the conf from the master; the conf will already have an Connection
// associate so the below mocking of a connection will fail.
try {
return HConnectionTestingUtility.getMockedConnectionAndDecorate(
@@ -284,7 +284,7 @@ public class TestMasterNoCluster {
@Override
public ClusterConnection getConnection() {
// Insert a mock for the connection, use TESTUTIL.getConfiguration rather than
- // the conf from the master; the conf will already have an HConnection
+ // the conf from the master; the conf will already have an Connection
// associate so the below mocking of a connection will fail.
try {
return HConnectionTestingUtility.getMockedConnectionAndDecorate(
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
index 692b5a0..7c41c0f 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
@@ -86,8 +86,8 @@ public class TestRestartCluster {
LOG.info("\n\nStarting cluster the second time");
UTIL.restartHBaseCluster(3);
- // Need to use a new 'Configuration' so we make a new HConnection.
- // Otherwise we're reusing an HConnection that has gone stale because
+ // Need to use a new 'Configuration' so we make a new Connection.
+ // Otherwise we're reusing an Connection that has gone stale because
// the shutdown of the cluster also called shut of the connection.
allRegions = MetaTableAccessor.getAllRegions(UTIL.getConnection(), false);
assertEquals(4, allRegions.size());
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
index 22e91f0..2bcff5d 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread;
import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.RegionServerCallable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
@@ -170,7 +170,7 @@ public class TestHRegionServerBulkLoad {
}
// bulk load HFiles
- final HConnection conn = UTIL.getHBaseAdmin().getConnection();
+ final ClusterConnection conn = (ClusterConnection)UTIL.getHBaseAdmin().getConnection();
RegionServerCallable callable =
new RegionServerCallable(conn, tableName, Bytes.toBytes("aaa")) {
@Override
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
index 57c3196..e7556a1 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSinkManager.java
@@ -25,9 +25,9 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
import org.apache.hadoop.hbase.replication.ReplicationPeers;
@@ -51,7 +51,7 @@ public class TestReplicationSinkManager {
public void setUp() {
replicationPeers = mock(ReplicationPeers.class);
replicationEndpoint = mock(HBaseReplicationEndpoint.class);
- sinkManager = new ReplicationSinkManager(mock(HConnection.class),
+ sinkManager = new ReplicationSinkManager(mock(ClusterConnection.class),
PEER_CLUSTER_ID, replicationEndpoint, new Configuration());
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
index e91d0e1..316aea0 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
@@ -62,9 +62,7 @@ import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.ResultScanner;
@@ -157,10 +155,10 @@ public class BaseTestHBaseFsck {
* This method is used to undeploy a region -- close it and attempt to
* remove its state from the Master.
*/
- protected void undeployRegion(Connection conn, ServerName sn,
+ protected void undeployRegion(ClusterConnection conn, ServerName sn,
HRegionInfo hri) throws IOException, InterruptedException {
try {
- HBaseFsckRepair.closeRegionSilentlyAndWait((HConnection) conn, sn, hri);
+ HBaseFsckRepair.closeRegionSilentlyAndWait(conn, sn, hri);
if (!hri.isMetaTable()) {
admin.offline(hri.getRegionName());
}
@@ -355,7 +353,8 @@ public class BaseTestHBaseFsck {
Map> mm =
new HashMap>();
for (ServerName hsi : regionServers) {
- AdminProtos.AdminService.BlockingInterface server = ((HConnection) connection).getAdmin(hsi);
+ AdminProtos.AdminService.BlockingInterface server =
+ ((ClusterConnection) connection).getAdmin(hsi);
// list all online regions from this region server
List regions = ProtobufUtil.getOnlineRegions(server);
@@ -855,7 +854,7 @@ public class BaseTestHBaseFsck {
if (unassign) {
LOG.info("Undeploying meta region " + hri + " from server " + hsa);
try (Connection unmanagedConnection = ConnectionFactory.createConnection(conf)) {
- undeployRegion(unmanagedConnection, hsa, hri);
+ undeployRegion((ClusterConnection)unmanagedConnection, hsa, hri);
}
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
index 26f2db9..060a6f4 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java
@@ -36,11 +36,9 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -60,7 +58,7 @@ public abstract class MultiThreadedAction {
protected final TableName tableName;
protected final Configuration conf;
- protected final HConnection connection; // all reader / writer threads will share this connection
+ protected final Connection connection; // all reader / writer threads will share this connection
protected int numThreads = 1;
@@ -83,7 +81,7 @@ public abstract class MultiThreadedAction {
* set of column families, and random number of columns in range. The table for it can
* be created manually or, for example, via
* {@link HBaseTestingUtility#createPreSplitLoadTestTable(
- * org.apache.hadoop.hbase.Configuration, byte[], byte[], Algorithm, DataBlockEncoding)}
+ * org.apache.hadoop.conf.Configuration, TableName, byte[], Algorithm, DataBlockEncoding)}
*/
public static class DefaultDataGenerator extends LoadTestDataGenerator {
private byte[][] columnFamilies = null;
@@ -155,7 +153,7 @@ public abstract class MultiThreadedAction {
this.dataGenerator = dataGen;
this.tableName = tableName;
this.actionLetter = actionLetter;
- this.connection = (HConnection) ConnectionFactory.createConnection(conf);
+ this.connection = ConnectionFactory.createConnection(conf);
}
public void start(long startKey, long endKey, int numThreads) throws IOException {
@@ -493,7 +491,7 @@ public abstract class MultiThreadedAction {
}
private void printLocations(Result r) {
- RegionLocations rl = null;
+ HRegionLocation rl = null;
if (r == null) {
LOG.info("FAILED FOR null Result");
return;
@@ -503,14 +501,11 @@ public abstract class MultiThreadedAction {
return;
}
try {
- rl = ((ClusterConnection)connection).locateRegion(tableName, r.getRow(), true, true);
+ rl = connection.getRegionLocator(tableName).getRegionLocation(r.getRow(), true);
} catch (IOException e) {
LOG.warn("Couldn't get locations for row " + Bytes.toString(r.getRow()));
}
- HRegionLocation locations[] = rl.getRegionLocations();
- for (HRegionLocation h : locations) {
- LOG.info("LOCATION " + h);
- }
+ LOG.info("LOCATION " + rl);
}
private String resultToString(Result result) {
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
index ca06e97..5732632 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java
@@ -158,7 +158,7 @@ public class MultiThreadedReader extends MultiThreadedAction
setName(getClass().getSimpleName() + "_" + readerId);
}
- protected HTableInterface createTable() throws IOException {
+ protected Table createTable() throws IOException {
return connection.getTable(tableName);
}
@@ -379,8 +379,8 @@ public class MultiThreadedReader extends MultiThreadedAction
numKeysVerified.incrementAndGet();
}
} else {
- HRegionLocation hloc = connection.getRegionLocation(tableName,
- get.getRow(), false);
+ HRegionLocation hloc = connection.getRegionLocator(tableName)
+ .getRegionLocation(get.getRow(), false);
String rowKey = Bytes.toString(get.getRow());
LOG.info("Key = " + rowKey + ", Region location: " + hloc);
if(isNullExpected) {
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java
index 84cc47d..a4a7df2 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReaderWithACL.java
@@ -69,7 +69,7 @@ public class MultiThreadedReaderWithACL extends MultiThreadedReader {
}
@Override
- protected HTableInterface createTable() throws IOException {
+ protected Table createTable() throws IOException {
return null;
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
index e28acc6..0f3baf9 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedUpdater.java
@@ -130,7 +130,7 @@ public class MultiThreadedUpdater extends MultiThreadedWriterBase {
table = createTable();
}
- protected HTableInterface createTable() throws IOException {
+ protected Table createTable() throws IOException {
return connection.getTable(tableName);
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java
index 83e207a..32a06bb 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriter.java
@@ -87,7 +87,7 @@ public class MultiThreadedWriter extends MultiThreadedWriterBase {
table = createTable();
}
- protected HTableInterface createTable() throws IOException {
+ protected Table createTable() throws IOException {
return connection.getTable(tableName);
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java
index d4e6d80..cc7fbb8 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java
@@ -97,8 +97,8 @@ public abstract class MultiThreadedWriterBase extends MultiThreadedAction {
protected String getRegionDebugInfoSafe(Table table, byte[] rowKey) {
HRegionLocation cached = null, real = null;
try {
- cached = connection.getRegionLocation(tableName, rowKey, false);
- real = connection.getRegionLocation(tableName, rowKey, true);
+ cached = connection.getRegionLocator(tableName).getRegionLocation(rowKey, false);
+ real = connection.getRegionLocator(tableName).getRegionLocation(rowKey, true);
} catch (Throwable t) {
// Cannot obtain region information for another catch block - too bad!
}
diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
index b3bd355..4de147e 100644
--- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
+++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
@@ -352,7 +351,7 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck {
}
}
- HBaseFsckRepair.closeRegionSilentlyAndWait((HConnection) connection,
+ HBaseFsckRepair.closeRegionSilentlyAndWait(connection,
cluster.getRegionServer(k).getServerName(), hbi.getHdfsHRI());
admin.offline(regionName);
break;
| | | |