From 414b364b17b33d9c21f63720404615e79d728995 Mon Sep 17 00:00:00 2001 From: Duo Zhang Date: Mon, 21 Jan 2019 18:15:56 +0800 Subject: [PATCH] HBASE-21585 Use ConnectionImplementation instead of ClusterConnection for sync client implementation --- .../hadoop/hbase/client/AsyncProcess.java | 4 +- .../hbase/client/BufferedMutatorImpl.java | 5 +- .../client/ClientAsyncPrefetchScanner.java | 2 +- .../hadoop/hbase/client/ClientScanner.java | 22 +- .../hbase/client/ClientSimpleScanner.java | 4 +- .../hbase/client/ClusterConnection.java | 329 +----------------- .../hadoop/hbase/client/Connection.java | 32 ++ .../hbase/client/ConnectionFactory.java | 2 +- .../client/ConnectionImplementation.java | 211 ++++++++--- .../hadoop/hbase/client/ConnectionUtils.java | 12 +- .../hbase/client/FlushRegionCallable.java | 4 +- .../hadoop/hbase/client/HBaseAdmin.java | 70 ++-- .../apache/hadoop/hbase/client/HBaseHbck.java | 17 +- .../hadoop/hbase/client/HRegionLocator.java | 27 +- .../apache/hadoop/hbase/client/HTable.java | 39 +-- .../hbase/client/HTableMultiplexer.java | 13 +- .../org/apache/hadoop/hbase/client/Hbck.java | 4 +- .../hadoop/hbase/client/MasterCallable.java | 7 +- .../hbase/client/MultiServerCallable.java | 12 +- .../client/RegionAdminServiceCallable.java | 26 +- .../client/RegionCoprocessorRpcChannel.java | 4 +- .../hbase/client/RegionServerCallable.java | 12 +- .../hbase/client/ReversedClientScanner.java | 11 +- .../hbase/client/ReversedScannerCallable.java | 8 +- .../RpcRetryingCallerWithReadReplicas.java | 36 +- .../hadoop/hbase/client/ScannerCallable.java | 11 +- .../client/ScannerCallableWithReplicas.java | 25 +- .../hadoop/hbase/client/TestAsyncProcess.java | 104 +++--- .../TestAsyncProcessWithRegionException.java | 10 +- .../hbase/client/TestBufferedMutator.java | 5 +- .../hbase/client/TestClientScanner.java | 8 +- .../client/TestHTableMultiplexerViaMocks.java | 4 +- .../client/TestReversedScannerCallable.java | 3 +- ...onServerBulkLoadWithOldSecureEndpoint.java | 32 +- .../hadoop/hbase/DistributedHBaseCluster.java | 27 +- .../TestMultiTableInputFormatBase.java | 4 +- .../mapreduce/TestTableInputFormatBase.java | 10 +- .../apache/hadoop/hbase/SharedConnection.java | 11 + .../hbase/client/AsyncClusterConnection.java | 2 +- .../assignment/AssignmentManagerUtil.java | 20 +- .../apache/hadoop/hbase/util/HBaseFsck.java | 30 +- .../hadoop/hbase/util/MultiHConnection.java | 15 +- .../hadoop/hbase/util/RegionSplitter.java | 33 +- .../org/apache/hadoop/hbase/HBaseCluster.java | 23 -- .../hadoop/hbase/HBaseTestingUtility.java | 3 +- .../apache/hadoop/hbase/MiniHBaseCluster.java | 24 -- .../client/HConnectionTestingUtility.java | 69 ++-- .../hadoop/hbase/client/TestAdmin1.java | 29 +- .../hadoop/hbase/client/TestAdmin2.java | 2 +- .../hbase/client/TestAsyncTableAdminApi.java | 2 +- .../hadoop/hbase/client/TestCISleep.java | 10 +- .../hbase/client/TestClientPushback.java | 6 +- .../client/TestConnectionImplementation.java | 5 +- .../hbase/client/TestFromClientSide3.java | 6 +- .../hbase/client/TestHBaseAdminNoCluster.java | 5 +- .../TestMetaTableAccessorNoCluster.java | 50 +-- .../hbase/client/TestMetaWithReplicas.java | 4 +- .../hbase/client/TestMultiParallel.java | 5 +- .../hbase/client/TestReplicasClient.java | 28 +- .../client/TestSeparateClientZKCluster.java | 2 +- .../client/TestShortCircuitConnection.java | 3 +- .../client/TestSnapshotCloneIndependence.java | 2 +- .../hbase/master/TestClockSkewDetection.java | 13 +- .../TestMetaAssignmentWithStopMaster.java | 48 +-- .../MasterProcedureTestingUtility.java | 4 +- .../TestHRegionServerBulkLoad.java | 30 +- ...estHRegionServerBulkLoadWithOldClient.java | 29 +- .../regionserver/TestWALEntrySinkFilter.java | 2 + ...estLoadIncrementalHFilesSplitRecovery.java | 35 +- .../hadoop/hbase/util/BaseTestHBaseFsck.java | 12 +- .../hbase/util/MultiThreadedReader.java | 8 +- .../hbase/util/MultiThreadedWriterBase.java | 7 +- .../thrift2/client/ThriftConnection.java | 2 + .../hbase/thrift2/TestThriftConnection.java | 4 +- 74 files changed, 708 insertions(+), 1031 deletions(-) rename hbase-server/src/test/java/org/apache/hadoop/hbase/{ => client}/TestMetaTableAccessorNoCluster.java (85%) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index de7449bf28..b0f863f474 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java @@ -136,7 +136,7 @@ class AsyncProcess { // TODO: many of the fields should be made private final long id; - final ClusterConnection connection; + final ConnectionImplementation connection; private final RpcRetryingCallerFactory rpcCallerFactory; final RpcControllerFactory rpcFactory; @@ -161,7 +161,7 @@ class AsyncProcess { public static final String LOG_DETAILS_PERIOD = "hbase.client.log.detail.period.ms"; private static final int DEFAULT_LOG_DETAILS_PERIOD = 10000; private final int periodToLog; - AsyncProcess(ClusterConnection hc, Configuration conf, + AsyncProcess(ConnectionImplementation hc, Configuration conf, RpcRetryingCallerFactory rpcCaller, RpcControllerFactory rpcFactory) { if (hc == null) { throw new IllegalArgumentException("ClusterConnection cannot be null."); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java index d4bc811c72..3548627531 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java @@ -95,7 +95,8 @@ public class BufferedMutatorImpl implements BufferedMutator { private final AsyncProcess ap; @VisibleForTesting - BufferedMutatorImpl(ClusterConnection conn, BufferedMutatorParams params, AsyncProcess ap) { + BufferedMutatorImpl(ConnectionImplementation conn, BufferedMutatorParams params, + AsyncProcess ap) { if (conn == null || conn.isClosed()) { throw new IllegalArgumentException("Connection is null or closed."); } @@ -140,7 +141,7 @@ public class BufferedMutatorImpl implements BufferedMutator { params.getOperationTimeout() : conn.getConnectionConfiguration().getOperationTimeout()); this.ap = ap; } - BufferedMutatorImpl(ClusterConnection conn, RpcRetryingCallerFactory rpcCallerFactory, + BufferedMutatorImpl(ConnectionImplementation conn, RpcRetryingCallerFactory rpcCallerFactory, RpcControllerFactory rpcFactory, BufferedMutatorParams params) { this(conn, params, // puts need to track errors globally due to how the APIs currently work. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java index e5af8717a8..c4b3162169 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java @@ -67,7 +67,7 @@ public class ClientAsyncPrefetchScanner extends ClientSimpleScanner { private final Condition notFull = lock.newCondition(); public ClientAsyncPrefetchScanner(Configuration configuration, Scan scan, TableName name, - ClusterConnection connection, RpcRetryingCallerFactory rpcCallerFactory, + ConnectionImplementation connection, RpcRetryingCallerFactory rpcCallerFactory, RpcControllerFactory rpcControllerFactory, ExecutorService pool, int replicaCallTimeoutMicroSecondScan) throws IOException { super(configuration, scan, name, connection, rpcCallerFactory, rpcControllerFactory, pool, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java index 3dbe5f427d..0b5bb2cb5c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java @@ -21,32 +21,31 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.calcEstimatedSize; import static org.apache.hadoop.hbase.client.ConnectionUtils.createScanResultCache; import static org.apache.hadoop.hbase.client.ConnectionUtils.incRegionCountMetrics; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - import java.io.IOException; import java.io.InterruptedIOException; import java.util.ArrayDeque; import java.util.Queue; import java.util.concurrent.ExecutorService; - import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.client.ScannerCallable.MoreResults; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException; -import org.apache.hadoop.hbase.exceptions.ScannerResetException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.NotServingRegionException; -import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownScannerException; +import org.apache.hadoop.hbase.client.ScannerCallable.MoreResults; +import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException; +import org.apache.hadoop.hbase.exceptions.ScannerResetException; +import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** @@ -71,7 +70,7 @@ public abstract class ClientScanner extends AbstractClientScanner { // Keep lastResult returned successfully in case we have to reset scanner. protected Result lastResult = null; protected final long maxScannerResultSize; - private final ClusterConnection connection; + private final ConnectionImplementation connection; protected final TableName tableName; protected final int scannerTimeout; protected boolean scanMetricsPublished = false; @@ -94,10 +93,9 @@ public abstract class ClientScanner extends AbstractClientScanner { * @param scan {@link Scan} to use in this scanner * @param tableName The table that we wish to scan * @param connection Connection identifying the cluster - * @throws IOException */ public ClientScanner(final Configuration conf, final Scan scan, final TableName tableName, - ClusterConnection connection, RpcRetryingCallerFactory rpcFactory, + ConnectionImplementation connection, RpcRetryingCallerFactory rpcFactory, RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout) throws IOException { if (LOG.isTraceEnabled()) { @@ -141,7 +139,7 @@ public abstract class ClientScanner extends AbstractClientScanner { initCache(); } - protected ClusterConnection getConnection() { + protected ConnectionImplementation getConnection() { return this.connection; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java index 7e9c4b9cfc..e5d7b97697 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java @@ -37,11 +37,11 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory; @InterfaceAudience.Private public class ClientSimpleScanner extends ClientScanner { public ClientSimpleScanner(Configuration configuration, Scan scan, TableName name, - ClusterConnection connection, RpcRetryingCallerFactory rpcCallerFactory, + ConnectionImplementation connection, RpcRetryingCallerFactory rpcCallerFactory, RpcControllerFactory rpcControllerFactory, ExecutorService pool, int replicaCallTimeoutMicroSecondScan) throws IOException { super(configuration, scan, name, connection, rpcCallerFactory, rpcControllerFactory, pool, - replicaCallTimeoutMicroSecondScan); + replicaCallTimeoutMicroSecondScan); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java index d3e675c1eb..0d68d9678c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java @@ -20,21 +20,10 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; -import java.util.List; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.RegionLocations; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy; -import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; - /** Internal methods on Connection that should not be used by user code. */ @InterfaceAudience.Private // NOTE: Although this class is public, this class is meant to be used directly from internal @@ -42,320 +31,40 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientServ public interface ClusterConnection extends Connection { /** - * Key for configuration in Configuration whose value is the class we implement making a - * new Connection instance. - */ - String HBASE_CLIENT_CONNECTION_IMPL = "hbase.client.connection.impl"; - - /** - * @return - true if the master server is running - * @deprecated this has been deprecated without a replacement - */ - @Deprecated - boolean isMasterRunning() - throws MasterNotRunningException, ZooKeeperConnectionException; - - /** - * Use this api to check if the table has been created with the specified number of - * splitkeys which was used while creating the given table. - * Note : If this api is used after a table's region gets splitted, the api may return - * false. - * @param tableName - * tableName - * @param splitKeys - * splitKeys used while creating table - * @throws IOException - * if a remote or network exception occurs - */ - boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws - IOException; - - /** - * A table that isTableEnabled == false and isTableDisabled == false - * is possible. This happens when a table has a lot of regions - * that must be processed. - * @param tableName table name - * @return true if the table is enabled, false otherwise - * @throws IOException if a remote or network exception occurs - */ - boolean isTableEnabled(TableName tableName) throws IOException; - - /** - * @param tableName table name - * @return true if the table is disabled, false otherwise - * @throws IOException if a remote or network exception occurs - */ - boolean isTableDisabled(TableName tableName) throws IOException; - - /** - * Retrieve TableState, represent current table state. - * @param tableName table state for - * @return state of the table - */ - TableState getTableState(TableName tableName) throws IOException; - - /** - * Find the location of the region of tableName that row - * lives in. - * @param tableName name of the table row is in - * @param row row key you're trying to find the region of - * @return HRegionLocation that describes where to find the region in - * question - * @throws IOException if a remote or network exception occurs - */ - HRegionLocation locateRegion(final TableName tableName, - final byte [] row) throws IOException; - - /** - * Allows flushing the region cache. - */ - void clearRegionCache(); - - void cacheLocation(final TableName tableName, final RegionLocations location); - - /** - * Allows flushing the region cache of all locations that pertain to - * tableName - * @param tableName Name of the table whose regions we are to remove from - * cache. - */ - void clearRegionCache(final TableName tableName); - - /** - * Deletes cached locations for the specific region. - * @param location The location object for the region, to be purged from cache. - */ - void deleteCachedRegionLocation(final HRegionLocation location); - - /** - * Find the location of the region of tableName that row - * lives in, ignoring any value that might be in the cache. - * @param tableName name of the table row is in - * @param row row key you're trying to find the region of - * @return HRegionLocation that describes where to find the region in - * question - * @throws IOException if a remote or network exception occurs - */ - HRegionLocation relocateRegion(final TableName tableName, - final byte [] row) throws IOException; - - /** - * Find the location of the region of tableName that row - * lives in, ignoring any value that might be in the cache. - * @param tableName name of the table row is in - * @param row row key you're trying to find the region of - * @param replicaId the replicaId of the region - * @return RegionLocations that describe where to find the region in - * question - * @throws IOException if a remote or network exception occurs - */ - RegionLocations relocateRegion(final TableName tableName, - final byte [] row, int replicaId) throws IOException; - - /** - * Update the location cache. This is used internally by HBase, in most cases it should not be - * used by the client application. - * @param tableName the table name - * @param regionName the region name - * @param rowkey the row - * @param exception the exception if any. Can be null. - * @param source the previous location - */ - void updateCachedLocations(TableName tableName, byte[] regionName, byte[] rowkey, - Object exception, ServerName source); - - /** - * Gets the location of the region of regionName. - * @param regionName name of the region to locate - * @return HRegionLocation that describes where to find the region in - * question - * @throws IOException if a remote or network exception occurs - */ - HRegionLocation locateRegion(final byte[] regionName) - throws IOException; - - /** - * Gets the locations of all regions in the specified table, tableName. - * @param tableName table to get regions of - * @return list of region locations for all regions of table - * @throws IOException if IO failure occurs - */ - List locateRegions(final TableName tableName) throws IOException; - - /** - * Gets the locations of all regions in the specified table, tableName. - * @param tableName table to get regions of - * @param useCache Should we use the cache to retrieve the region information. - * @param offlined True if we are to include offlined regions, false and we'll leave out offlined - * regions from returned list. - * @return list of region locations for all regions of table - * @throws IOException if IO failure occurs + * Return all the replicas for a region. */ - List locateRegions(final TableName tableName, - final boolean useCache, - final boolean offlined) throws IOException; + default RegionLocations getRegionLocations(TableName tableName, byte[] row, boolean reload) + throws IOException { + return locateRegion(tableName, row, !reload, true); + } /** - * * @param tableName table to get regions of * @param row the row * @param useCache Should we use the cache to retrieve the region information. * @param retry do we retry * @return region locations for this row. * @throws IOException if IO failure occurs + * @deprecated Since 3.0.0. Will be removed in 4.0.0. the retry parameter is meaningless, we will + * always retry, you can control it by setting the retry configurations. + * @see #getRegionLocations(TableName, byte[], boolean) */ - RegionLocations locateRegion(TableName tableName, - byte[] row, boolean useCache, boolean retry) throws IOException; - - /** - * - * @param tableName table to get regions of - * @param row the row - * @param useCache Should we use the cache to retrieve the region information. - * @param retry do we retry - * @param replicaId the replicaId for the region - * @return region locations for this row. - * @throws IOException if IO failure occurs - */ - RegionLocations locateRegion(TableName tableName, byte[] row, boolean useCache, boolean retry, - int replicaId) throws IOException; - - /** - * Returns a {@link MasterKeepAliveConnection} to the active master - */ - MasterKeepAliveConnection getMaster() throws IOException; - - /** - * Get the admin service for master. - */ - AdminService.BlockingInterface getAdminForMaster() throws IOException; - - /** - * Establishes a connection to the region server at the specified address. - * @param serverName the region server to connect to - * @return proxy for HRegionServer - * @throws IOException if a remote or network exception occurs - */ - AdminService.BlockingInterface getAdmin(final ServerName serverName) throws IOException; - - /** - * Establishes a connection to the region server at the specified address, and returns - * a region client protocol. - * - * @param serverName the region server to connect to - * @return ClientProtocol proxy for RegionServer - * @throws IOException if a remote or network exception occurs - * - */ - ClientService.BlockingInterface getClient(final ServerName serverName) throws IOException; - - /** - * Find region location hosting passed row - * @param tableName table name - * @param row Row to find. - * @param reload If true do not use cache, otherwise bypass. - * @return Location of row. - * @throws IOException if a remote or network exception occurs - */ - HRegionLocation getRegionLocation(TableName tableName, byte[] row, boolean reload) + @Deprecated + RegionLocations locateRegion(TableName tableName, byte[] row, boolean useCache, boolean retry) throws IOException; /** - * Clear any caches that pertain to server name sn. - * @param sn A server name - */ - void clearCaches(final ServerName sn); - - /** - * @return Nonce generator for this ClusterConnection; may be null if disabled in configuration. - */ - NonceGenerator getNonceGenerator(); - - /** - * @return Default AsyncProcess associated with this connection. - */ - AsyncProcess getAsyncProcess(); - - /** - * Returns a new RpcRetryingCallerFactory from the given {@link Configuration}. - * This RpcRetryingCallerFactory lets the users create {@link RpcRetryingCaller}s which can be - * intercepted with the configured {@link RetryingCallerInterceptor} - * @param conf configuration - * @return RpcRetryingCallerFactory - */ - RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf); - - /** - * @return Connection's RpcRetryingCallerFactory instance + * Clear the cached region locations. Mainly used to testing. */ - RpcRetryingCallerFactory getRpcRetryingCallerFactory(); + default void clearRegionLocationCache() { + clearRegionCache(); + } /** - * @return Connection's RpcControllerFactory instance - */ - RpcControllerFactory getRpcControllerFactory(); - - /** - * @return a ConnectionConfiguration object holding parsed configuration values - */ - ConnectionConfiguration getConnectionConfiguration(); - - /** - * @return the current statistics tracker associated with this connection - */ - ServerStatisticTracker getStatisticsTracker(); - - /** - * @return the configured client backoff policy - */ - ClientBackoffPolicy getBackoffPolicy(); - - /** - * @return the MetricsConnection instance associated with this connection. - */ - MetricsConnection getConnectionMetrics(); - - /** - * @return true when this connection uses a {@link org.apache.hadoop.hbase.codec.Codec} and so - * supports cell blocks. - */ - boolean hasCellBlockSupport(); - - /** - * @return the number of region servers that are currently running - * @throws IOException if a remote or network exception occurs - */ - int getCurrentNrHRS() throws IOException; - - /** - * Retrieve an Hbck implementation to fix an HBase cluster. - * The returned Hbck is not guaranteed to be thread-safe. A new instance should be created by - * each thread. This is a lightweight operation. Pooling or caching of the returned Hbck instance - * is not recommended. - *
- * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance. - *
- * This will be used mostly by hbck tool. - * - * @return an Hbck instance for active master. Active master is fetched from the zookeeper. - */ - Hbck getHbck() throws IOException; - - /** - * Retrieve an Hbck implementation to fix an HBase cluster. - * The returned Hbck is not guaranteed to be thread-safe. A new instance should be created by - * each thread. This is a lightweight operation. Pooling or caching of the returned Hbck instance - * is not recommended. - *
- * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance. - *
- * This will be used mostly by hbck tool. This may only be used to by pass getting - * registered master from ZK. In situations where ZK is not available or active master is not - * registered with ZK and user can get master address by other means, master can be explicitly - * specified. - * - * @param masterServer explicit {@link ServerName} for master server - * @return an Hbck instance for a specified master server + * Allows flushing the region cache. + * @deprecated Since 3.0.0. Will be removed in 4.0.0. The name is bit confusing. + * @see #clearRegionLocationCache() */ - Hbck getHbck(ServerName masterServer) throws IOException; + @Deprecated + void clearRegionCache(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java index 1d28777ee0..928eeec4be 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java @@ -24,6 +24,8 @@ import java.util.concurrent.ExecutorService; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -182,4 +184,34 @@ public interface Connection extends Abortable, Closeable { * @param pool the thread pool to use for requests like batch and scan */ TableBuilder getTableBuilder(TableName tableName, ExecutorService pool); + + + /** + * Retrieve an Hbck implementation to fix an HBase cluster. The returned Hbck is not guaranteed to + * be thread-safe. A new instance should be created by each thread. This is a lightweight + * operation. Pooling or caching of the returned Hbck instance is not recommended.
+ * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance.
+ * This will be used mostly by hbck tool. + * @return an Hbck instance for active master. Active master is fetched from the zookeeper. + */ + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.HBCK) + default Hbck getHbck() throws IOException { + throw new UnsupportedOperationException("Not implemented"); + } + + /** + * Retrieve an Hbck implementation to fix an HBase cluster. The returned Hbck is not guaranteed to + * be thread-safe. A new instance should be created by each thread. This is a lightweight + * operation. Pooling or caching of the returned Hbck instance is not recommended.
+ * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance.
+ * This will be used mostly by hbck tool. This may only be used to by pass getting registered + * master from ZK. In situations where ZK is not available or active master is not registered with + * ZK and user can get master address by other means, master can be explicitly specified. + * @param masterServer explicit {@link ServerName} for master server + * @return an Hbck instance for a specified master server + */ + @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.HBCK) + default Hbck getHbck(ServerName masterServer) throws IOException { + throw new UnsupportedOperationException("Not implemented"); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index 2ba732af51..8cc6782ae5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -211,7 +211,7 @@ public class ConnectionFactory { */ public static Connection createConnection(Configuration conf, ExecutorService pool, final User user) throws IOException { - String className = conf.get(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, + String className = conf.get(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, ConnectionImplementation.class.getName()); Class clazz; try { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index 4e3543f4d6..474e5de8b9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -89,6 +89,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Throwables; import org.apache.hbase.thirdparty.com.google.protobuf.BlockingRpcChannel; import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; @@ -346,9 +347,8 @@ class ConnectionImplementation implements ClusterConnection, Closeable { */ @VisibleForTesting static NonceGenerator injectNonceGeneratorForTesting( - ClusterConnection conn, NonceGenerator cnm) { - ConnectionImplementation connImpl = (ConnectionImplementation)conn; - NonceGenerator ng = connImpl.getNonceGenerator(); + ConnectionImplementation conn, NonceGenerator cnm) { + NonceGenerator ng = conn.getNonceGenerator(); LOG.warn("Nonce generator is being replaced by test code for " + cnm.getClass().getName()); nonceGenerator = cnm; @@ -448,7 +448,9 @@ class ConnectionImplementation implements ClusterConnection, Closeable { }), rpcControllerFactory); } - @Override + /** + * @return the MetricsConnection instance associated with this connection. + */ public MetricsConnection getConnectionMetrics() { return this.metrics; } @@ -592,7 +594,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { * @deprecated this has been deprecated without a replacement */ @Deprecated - @Override public boolean isMasterRunning() throws MasterNotRunningException, ZooKeeperConnectionException { // When getting the master connection, we check it's running, // so if there is no exception, it means we've been able to get a @@ -607,24 +608,52 @@ class ConnectionImplementation implements ClusterConnection, Closeable { return true; } - @Override + /** + * Find region location hosting passed row + * @param tableName table name + * @param row Row to find. + * @param reload If true do not use cache, otherwise bypass. + * @return Location of row. + * @throws IOException if a remote or network exception occurs + */ public HRegionLocation getRegionLocation(final TableName tableName, final byte[] row, boolean reload) throws IOException { return reload ? relocateRegion(tableName, row) : locateRegion(tableName, row); } - - @Override + /** + * A table that isTableEnabled == false and isTableDisabled == false + * is possible. This happens when a table has a lot of regions + * that must be processed. + * @param tableName table name + * @return true if the table is enabled, false otherwise + * @throws IOException if a remote or network exception occurs + */ public boolean isTableEnabled(TableName tableName) throws IOException { return getTableState(tableName).inStates(TableState.State.ENABLED); } - @Override + /** + * @param tableName table name + * @return true if the table is disabled, false otherwise + * @throws IOException if a remote or network exception occurs + */ public boolean isTableDisabled(TableName tableName) throws IOException { return getTableState(tableName).inStates(TableState.State.DISABLED); } - @Override + /** + * Use this api to check if the table has been created with the specified number of + * splitkeys which was used while creating the given table. + * Note : If this api is used after a table's region gets splitted, the api may return + * false. + * @param tableName + * tableName + * @param splitKeys + * splitKeys used while creating table + * @throws IOException + * if a remote or network exception occurs + */ public boolean isTableAvailable(final TableName tableName, @Nullable final byte[][] splitKeys) throws IOException { checkClosed(); @@ -679,7 +708,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } } - @Override + /** + * Gets the location of the region of regionName. + * @param regionName name of the region to locate + * @return HRegionLocation that describes where to find the region in + * question + * @throws IOException if a remote or network exception occurs + */ public HRegionLocation locateRegion(final byte[] regionName) throws IOException { RegionLocations locations = locateRegion(RegionInfo.getTable(regionName), RegionInfo.getStartKey(regionName), false, true); @@ -694,12 +729,25 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } } - @Override + /** + * Gets the locations of all regions in the specified table, tableName. + * @param tableName table to get regions of + * @return list of region locations for all regions of table + * @throws IOException if IO failure occurs + */ public List locateRegions(TableName tableName) throws IOException { return locateRegions(tableName, false, true); } - @Override + /** + * Gets the locations of all regions in the specified table, tableName. + * @param tableName table to get regions of + * @param useCache Should we use the cache to retrieve the region information. + * @param offlined True if we are to include offlined regions, false and we'll leave out offlined + * regions from returned list. + * @return list of region locations for all regions of table + * @throws IOException if IO failure occurs + */ public List locateRegions(TableName tableName, boolean useCache, boolean offlined) throws IOException { List regions; @@ -725,14 +773,28 @@ class ConnectionImplementation implements ClusterConnection, Closeable { return locations; } - @Override + /** + * Find the location of the region of tableName that row lives in. + * @param tableName name of the table row is in + * @param row row key you're trying to find the region of + * @return HRegionLocation that describes where to find the region in question + * @throws IOException if a remote or network exception occurs + */ public HRegionLocation locateRegion(final TableName tableName, final byte[] row) throws IOException { RegionLocations locations = locateRegion(tableName, row, true, true); return locations == null ? null : locations.getRegionLocation(); } - @Override + /** + * Find the location of the region of tableName that row + * lives in, ignoring any value that might be in the cache. + * @param tableName name of the table row is in + * @param row row key you're trying to find the region of + * @return HRegionLocation that describes where to find the region in + * question + * @throws IOException if a remote or network exception occurs + */ public HRegionLocation relocateRegion(final TableName tableName, final byte[] row) throws IOException { RegionLocations locations = @@ -741,7 +803,16 @@ class ConnectionImplementation implements ClusterConnection, Closeable { : locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID); } - @Override + /** + * Find the location of the region of tableName that row + * lives in, ignoring any value that might be in the cache. + * @param tableName name of the table row is in + * @param row row key you're trying to find the region of + * @param replicaId the replicaId of the region + * @return RegionLocations that describe where to find the region in + * question + * @throws IOException if a remote or network exception occurs + */ public RegionLocations relocateRegion(final TableName tableName, final byte [] row, int replicaId) throws IOException{ // Since this is an explicit request not to use any caching, finding @@ -760,7 +831,15 @@ class ConnectionImplementation implements ClusterConnection, Closeable { return locateRegion(tableName, row, useCache, retry, RegionReplicaUtil.DEFAULT_REPLICA_ID); } - @Override + /** + * @param tableName table to get regions of + * @param row the row + * @param useCache Should we use the cache to retrieve the region information. + * @param retry do we retry + * @param replicaId the replicaId for the region + * @return region locations for this row. + * @throws IOException if IO failure occurs + */ public RegionLocations locateRegion(final TableName tableName, final byte[] row, boolean useCache, boolean retry, int replicaId) throws IOException { checkClosed(); @@ -965,7 +1044,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { * @param tableName The table name. * @param location the new location */ - @Override public void cacheLocation(final TableName tableName, final RegionLocations location) { metaCache.cacheLocation(tableName, location); } @@ -984,20 +1062,29 @@ class ConnectionImplementation implements ClusterConnection, Closeable { metaCache.clearCache(tableName, row); } - /* - * Delete all cached entries of a table that maps to a specific location. + /** + * Clear any caches that pertain to server name serverName. + * @param serverName A server name */ - @Override public void clearCaches(final ServerName serverName) { metaCache.clearCache(serverName); } + + /** + * Allows flushing the region cache. + */ @Override public void clearRegionCache() { metaCache.clearCache(); } - @Override + /** + * Allows flushing the region cache of all locations that pertain to + * tableName + * @param tableName Name of the table whose regions we are to remove from + * cache. + */ public void clearRegionCache(final TableName tableName) { metaCache.clearCache(tableName); } @@ -1199,12 +1286,19 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } } - @Override + /** + * Get the admin service for master. + */ public AdminProtos.AdminService.BlockingInterface getAdminForMaster() throws IOException { return getAdmin(get(registry.getMasterAddress())); } - @Override + /** + * Establishes a connection to the region server at the specified address. + * @param serverName the region server to connect to + * @return proxy for HRegionServer + * @throws IOException if a remote or network exception occurs + */ public AdminProtos.AdminService.BlockingInterface getAdmin(ServerName serverName) throws IOException { checkClosed(); @@ -1220,7 +1314,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable { }); } - @Override + /** + * Establishes a connection to the region server at the specified address, and returns a region + * client protocol. + * @param serverName the region server to connect to + * @return ClientProtocol proxy for RegionServer + * @throws IOException if a remote or network exception occurs + */ public BlockingInterface getClient(ServerName serverName) throws IOException { checkClosed(); if (isDeadServer(serverName)) { @@ -1230,14 +1330,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable { serverName, this.hostnamesCanChange); return (ClientProtos.ClientService.BlockingInterface) computeIfAbsentEx(stubs, key, () -> { BlockingRpcChannel channel = - this.rpcClient.createBlockingRpcChannel(serverName, user, rpcTimeout); + this.rpcClient.createBlockingRpcChannel(serverName, user, rpcTimeout); return ClientProtos.ClientService.newBlockingStub(channel); }); } final MasterServiceState masterServiceState = new MasterServiceState(this); - @Override public MasterKeepAliveConnection getMaster() throws IOException { return getKeepAliveMasterService(); } @@ -1831,7 +1930,10 @@ class ConnectionImplementation implements ClusterConnection, Closeable { cacheLocation(hri.getTable(), source, newHrl); } - @Override + /** + * Deletes cached locations for the specific region. + * @param location The location object for the region, to be purged from cache. + */ public void deleteCachedRegionLocation(final HRegionLocation location) { metaCache.clearCache(location); } @@ -1844,7 +1946,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { * or wrapped or both RegionMovedException * @param source server that is the source of the location update. */ - @Override public void updateCachedLocations(final TableName tableName, byte[] regionName, byte[] rowkey, final Object exception, final ServerName source) { if (rowkey == null || tableName == null) { @@ -1911,17 +2012,23 @@ class ConnectionImplementation implements ClusterConnection, Closeable { metaCache.clearCache(regionInfo); } - @Override + /** + * @return Default AsyncProcess associated with this connection. + */ public AsyncProcess getAsyncProcess() { return asyncProcess; } - @Override + /** + * @return the current statistics tracker associated with this connection + */ public ServerStatisticTracker getStatisticsTracker() { return this.stats; } - @Override + /** + * @return the configured client backoff policy + */ public ClientBackoffPolicy getBackoffPolicy() { return this.backoffPolicy; } @@ -1957,7 +2064,10 @@ class ConnectionImplementation implements ClusterConnection, Closeable { return this.aborted; } - @Override + /** + * @return the number of region servers that are currently running + * @throws IOException if a remote or network exception occurs + */ public int getCurrentNrHRS() throws IOException { return get(this.registry.getCurrentNrHRS()); } @@ -2000,12 +2110,18 @@ class ConnectionImplementation implements ClusterConnection, Closeable { close(); } - @Override + /** + * @return Nonce generator for this ClusterConnection; may be null if disabled in configuration. + */ public NonceGenerator getNonceGenerator() { return nonceGenerator; } - @Override + /** + * Retrieve TableState, represent current table state. + * @param tableName table state for + * @return state of the table + */ public TableState getTableState(TableName tableName) throws IOException { checkClosed(); TableState tableState = MetaTableAccessor.getTableState(this, tableName); @@ -2015,28 +2131,43 @@ class ConnectionImplementation implements ClusterConnection, Closeable { return tableState; } - @Override + /** + * Returns a new RpcRetryingCallerFactory from the given {@link Configuration}. + * This RpcRetryingCallerFactory lets the users create {@link RpcRetryingCaller}s which can be + * intercepted with the configured {@link RetryingCallerInterceptor} + * @param conf configuration + * @return RpcRetryingCallerFactory + */ public RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf) { return RpcRetryingCallerFactory .instantiate(conf, this.interceptor, this.getStatisticsTracker()); } - @Override + /** + * @return true when this connection uses a {@link org.apache.hadoop.hbase.codec.Codec} and so + * supports cell blocks. + */ public boolean hasCellBlockSupport() { return this.rpcClient.hasCellBlockSupport(); } - @Override + /** + * @return a ConnectionConfiguration object holding parsed configuration values + */ public ConnectionConfiguration getConnectionConfiguration() { return this.connectionConfig; } - @Override + /** + * @return Connection's RpcRetryingCallerFactory instance + */ public RpcRetryingCallerFactory getRpcRetryingCallerFactory() { return this.rpcCallerFactory; } - @Override + /** + * @return Connection's RpcControllerFactory instance + */ public RpcControllerFactory getRpcControllerFactory() { return this.rpcControllerFactory; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java index 8e050df93d..4905ca91f5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java @@ -72,6 +72,12 @@ public final class ConnectionUtils { private static final Logger LOG = LoggerFactory.getLogger(ConnectionUtils.class); + /** + * Key for configuration in Configuration whose value is the class we implement making a new + * Connection instance. + */ + public static final String HBASE_CLIENT_CONNECTION_IMPL = "hbase.client.connection.impl"; + private ConnectionUtils() { } @@ -101,7 +107,7 @@ public final class ConnectionUtils { * @param cnm Replaces the nonce generator used, for testing. * @return old nonce generator. */ - public static NonceGenerator injectNonceGeneratorForTesting(ClusterConnection conn, + public static NonceGenerator injectNonceGeneratorForTesting(ConnectionImplementation conn, NonceGenerator cnm) { return ConnectionImplementation.injectNonceGeneratorForTesting(conn, cnm); } @@ -178,7 +184,7 @@ public final class ConnectionUtils { * @return an short-circuit connection. * @throws IOException if IO failure occurred */ - public static ClusterConnection createShortCircuitConnection(final Configuration conf, + public static ConnectionImplementation createShortCircuitConnection(final Configuration conf, ExecutorService pool, User user, final ServerName serverName, final AdminService.BlockingInterface admin, final ClientService.BlockingInterface client) throws IOException { @@ -194,7 +200,7 @@ public final class ConnectionUtils { */ @VisibleForTesting public static void setupMasterlessConnection(Configuration conf) { - conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, MasterlessConnection.class.getName()); + conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, MasterlessConnection.class.getName()); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java index bb265a43f6..a704b5182d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java @@ -42,7 +42,7 @@ public class FlushRegionCallable extends RegionAdminServiceCallable(connection, rpcControllerFactory) { - @Override - protected TableDescriptor rpcCall() throws Exception { - GetTableDescriptorsRequest req = + executeCallable(new MasterCallable(connection, rpcControllerFactory) { + @Override + protected TableDescriptor rpcCall() throws Exception { + GetTableDescriptorsRequest req = RequestConverter.buildGetTableDescriptorsRequest(tableName); - GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); - if (!htds.getTableSchemaList().isEmpty()) { - return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0)); + GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); + if (!htds.getTableSchemaList().isEmpty()) { + return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0)); + } + return null; } - return null; - } - }, rpcCallerFactory, operationTimeout, rpcTimeout); + }, rpcCallerFactory, operationTimeout, rpcTimeout); if (td != null) { return td; } @@ -566,26 +567,27 @@ public class HBaseAdmin implements Admin { * Connection, RpcRetryingCallerFactory,RpcControllerFactory,int,int)} */ @Deprecated - static HTableDescriptor getHTableDescriptor(final TableName tableName, Connection connection, - RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory, - int operationTimeout, int rpcTimeout) throws IOException { + static HTableDescriptor getHTableDescriptor(final TableName tableName, + ConnectionImplementation connection, RpcRetryingCallerFactory rpcCallerFactory, + final RpcControllerFactory rpcControllerFactory, int operationTimeout, int rpcTimeout) + throws IOException { if (tableName == null) { return null; } HTableDescriptor htd = - executeCallable(new MasterCallable(connection, rpcControllerFactory) { - @Override - protected HTableDescriptor rpcCall() throws Exception { - GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest(tableName); - GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); - if (!htds.getTableSchemaList().isEmpty()) { - return new ImmutableHTableDescriptor( - ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0))); - } - return null; + executeCallable(new MasterCallable(connection, rpcControllerFactory) { + @Override + protected HTableDescriptor rpcCall() throws Exception { + GetTableDescriptorsRequest req = + RequestConverter.buildGetTableDescriptorsRequest(tableName); + GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); + if (!htds.getTableSchemaList().isEmpty()) { + return new ImmutableHTableDescriptor( + ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0))); } - }, rpcCallerFactory, operationTimeout, rpcTimeout); + return null; + } + }, rpcCallerFactory, operationTimeout, rpcTimeout); if (htd != null) { return new ImmutableHTableDescriptor(htd); } @@ -746,7 +748,7 @@ public class HBaseAdmin implements Admin { protected Void postOperationResult(final Void result, final long deadlineTs) throws IOException, TimeoutException { // Delete cached information to prevent clients from using old locations - ((ClusterConnection) getAdmin().getConnection()).clearRegionCache(getTableName()); + ((ConnectionImplementation) getAdmin().getConnection()).clearRegionCache(getTableName()); return super.postOperationResult(result, deadlineTs); } } @@ -2412,8 +2414,8 @@ public class HBaseAdmin implements Admin { // Check ZK first. // If the connection exists, we may have a connection to ZK that does not work anymore - try (ClusterConnection connection = - (ClusterConnection) ConnectionFactory.createConnection(copyOfConf)) { + try (ConnectionImplementation connection = + (ConnectionImplementation) ConnectionFactory.createConnection(copyOfConf)) { // can throw MasterNotRunningException connection.isMasterRunning(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java index a276017b0c..e7d0e5968a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java @@ -21,9 +21,14 @@ import java.io.IOException; import java.util.List; import java.util.concurrent.Callable; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @@ -31,16 +36,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.HbckService.BlockingInterface; -import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; - -import org.apache.yetus.audience.InterfaceAudience; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** - * Use {@link ClusterConnection#getHbck()} to obtain an instance of {@link Hbck} instead of + * Use {@link Connection#getHbck()} to obtain an instance of {@link Hbck} instead of * constructing an HBaseHbck directly. * *

Connection should be an unmanaged connection obtained via @@ -55,7 +53,6 @@ import org.slf4j.LoggerFactory; * by each thread. Pooling or caching of the instance is not recommended.

* * @see ConnectionFactory - * @see ClusterConnection * @see Hbck */ @InterfaceAudience.Private diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java index 255911471e..6ac99193b9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java @@ -20,10 +20,10 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.RegionLocations; @@ -47,9 +47,9 @@ import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti public class HRegionLocator implements RegionLocator { private final TableName tableName; - private final ClusterConnection connection; + private final ConnectionImplementation connection; - public HRegionLocator(TableName tableName, ClusterConnection connection) { + public HRegionLocator(TableName tableName, ConnectionImplementation connection) { this.connection = connection; this.tableName = tableName; } @@ -84,14 +84,19 @@ public class HRegionLocator implements RegionLocator { @Override public List getAllRegionLocations() throws IOException { TableName tableName = getName(); - ArrayList regions = new ArrayList<>(); - for (RegionLocations locations : listRegionLocations()) { - for (HRegionLocation location : locations.getRegionLocations()) { - regions.add(location); + if (TableName.isMetaTableName(tableName)) { + return Collections + .singletonList(connection.getRegionLocation(tableName, HConstants.EMPTY_START_ROW, false)); + } else { + ArrayList regions = new ArrayList<>(); + for (RegionLocations locations : listRegionLocations()) { + for (HRegionLocation location : locations.getRegionLocations()) { + regions.add(location); + } + connection.cacheLocation(tableName, locations); } - connection.cacheLocation(tableName, locations); + return regions; } - return regions; } /** @@ -124,7 +129,7 @@ public class HRegionLocator implements RegionLocator { final byte[][] endKeyList = new byte[regions.size()][]; for (int i = 0; i < regions.size(); i++) { - HRegionInfo region = regions.get(i).getRegionLocation().getRegionInfo(); + RegionInfo region = regions.get(i).getRegionLocation().getRegion(); startKeyList[i] = region.getStartKey(); endKeyList[i] = region.getEndKey(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index fb69a2530b..2224719ec6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -106,7 +106,7 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.checkHasFamilies; public class HTable implements Table { private static final Logger LOG = LoggerFactory.getLogger(HTable.class); private static final Consistency DEFAULT_CONSISTENCY = Consistency.STRONG; - private final ClusterConnection connection; + private final ConnectionImplementation connection; private final TableName tableName; private final Configuration configuration; private final ConnectionConfiguration connConfiguration; @@ -157,8 +157,7 @@ public class HTable implements Table { * @param rpcControllerFactory The RPC controller factory * @param pool ExecutorService to be used. */ - @InterfaceAudience.Private - protected HTable(final ClusterConnection connection, + protected HTable(final ConnectionImplementation connection, final TableBuilderBase builder, final RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory, @@ -464,22 +463,18 @@ public class HTable implements Table { } public static void doBatchWithCallback(List actions, Object[] results, - Callback callback, ClusterConnection connection, ExecutorService pool, TableName tableName) - throws InterruptedIOException, RetriesExhaustedWithDetailsException { - int operationTimeout = connection.getConnectionConfiguration().getOperationTimeout(); + Callback callback, Connection connection, ExecutorService pool, TableName tableName) + throws InterruptedIOException, RetriesExhaustedWithDetailsException { + ConnectionImplementation connImpl = (ConnectionImplementation) connection; + int operationTimeout = connImpl.getConnectionConfiguration().getOperationTimeout(); int writeTimeout = connection.getConfiguration().getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, - connection.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, - HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); - AsyncProcessTask task = AsyncProcessTask.newBuilder(callback) - .setPool(pool) - .setTableName(tableName) - .setRowAccess(actions) - .setResults(results) - .setOperationTimeout(operationTimeout) - .setRpcTimeout(writeTimeout) - .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL) - .build(); - AsyncRequestFuture ars = connection.getAsyncProcess().submit(task); + connection.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, + HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); + AsyncProcessTask task = + AsyncProcessTask.newBuilder(callback).setPool(pool).setTableName(tableName) + .setRowAccess(actions).setResults(results).setOperationTimeout(operationTimeout) + .setRpcTimeout(writeTimeout).setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).build(); + AsyncRequestFuture ars = connImpl.getAsyncProcess().submit(task); ars.waitUntilDone(); if (ars.hasError()) { throw ars.getErrors(); @@ -949,14 +944,6 @@ public class HTable implements Table { return this.pool; } - /** - * Explicitly clears the region cache to fetch the latest value from META. - * This is a power user function: avoid unless you know the ramifications. - */ - public void clearRegionCache() { - this.connection.clearRegionCache(); - } - @Override public CoprocessorRpcChannel coprocessorService(byte[] row) { return new RegionCoprocessorRpcChannel(connection, tableName, row); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java index e6b061e45f..c3c509dd63 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java @@ -34,7 +34,6 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; @@ -46,6 +45,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -78,7 +78,7 @@ public class HTableMultiplexer { new ConcurrentHashMap<>(); private final Configuration workerConf; - private final ClusterConnection conn; + private final ConnectionImplementation conn; private final ExecutorService pool; private final int maxAttempts; private final int perRegionServerBufferQueueSize; @@ -104,7 +104,7 @@ public class HTableMultiplexer { */ public HTableMultiplexer(Connection conn, Configuration conf, int perRegionServerBufferQueueSize) { - this.conn = (ClusterConnection) conn; + this.conn = (ConnectionImplementation) conn; this.pool = HTable.getDefaultExecutor(conf); // how many times we could try in total, one more than retry number this.maxAttempts = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, @@ -196,7 +196,7 @@ public class HTableMultiplexer { try { HTable.validatePut(put, maxKeyValueSize); // Allow mocking to get at the connection, but don't expose the connection to users. - ClusterConnection conn = (ClusterConnection) getConnection(); + ConnectionImplementation conn = getConnection(); // AsyncProcess in the FlushWorker should take care of refreshing the location cache // as necessary. We shouldn't have to do that here. HRegionLocation loc = conn.getRegionLocation(tableName, put.getRow(), false); @@ -257,7 +257,7 @@ public class HTableMultiplexer { } @VisibleForTesting - ClusterConnection getConnection() { + ConnectionImplementation getConnection() { return this.conn; } @@ -439,7 +439,8 @@ public class HTableMultiplexer { private final int writeRpcTimeout; // needed to pass in through AsyncProcess constructor private final int operationTimeout; private final ExecutorService pool; - public FlushWorker(Configuration conf, ClusterConnection conn, HRegionLocation addr, + + public FlushWorker(Configuration conf, ConnectionImplementation conn, HRegionLocation addr, HTableMultiplexer htableMultiplexer, int perRegionServerBufferQueueSize, ExecutorService pool, ScheduledExecutorService executor) { this.addr = addr; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java index e88805cdcc..fdecde99ce 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client; import java.io.Closeable; import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; @@ -28,7 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; /** - * Hbck fixup tool APIs. Obtain an instance from {@link ClusterConnection#getHbck()} and call + * Hbck fixup tool APIs. Obtain an instance from {@link Connection#getHbck()} and call * {@link #close()} when done. *

WARNING: the below methods can damage the cluster. It may leave the cluster in an * indeterminate state, e.g. region not assigned, or some hdfs files left behind. After running @@ -36,7 +35,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; * procedures to get regions back online. DO AT YOUR OWN RISK. For experienced users only. * * @see ConnectionFactory - * @see ClusterConnection * @since 2.0.2, 2.1.1 */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.HBCK) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java index 7ae97314c4..04da2ebe4a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java @@ -43,12 +43,13 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; */ @InterfaceAudience.Private abstract class MasterCallable implements RetryingCallable, Closeable { - protected final ClusterConnection connection; + protected final ConnectionImplementation connection; protected MasterKeepAliveConnection master; private final HBaseRpcController rpcController; - MasterCallable(final Connection connection, final RpcControllerFactory rpcConnectionFactory) { - this.connection = (ClusterConnection) connection; + MasterCallable(ConnectionImplementation connection, + final RpcControllerFactory rpcConnectionFactory) { + this.connection = connection; this.rpcController = rpcConnectionFactory.newController(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java index bfc161e5c2..bf557faa23 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java @@ -22,7 +22,6 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -31,15 +30,16 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; + import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; - -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Callable that handles the multi method call going against a single @@ -52,7 +52,7 @@ class MultiServerCallable extends CancellableRegionServerCallable private MultiAction multiAction; private boolean cellBlock; - MultiServerCallable(final ClusterConnection connection, final TableName tableName, + MultiServerCallable(final ConnectionImplementation connection, final TableName tableName, final ServerName location, final MultiAction multi, RpcController rpcController, int rpcTimeout, RetryingTimeTracker tracker, int priority) { super(connection, tableName, null, rpcController, rpcTimeout, tracker, priority); @@ -141,7 +141,7 @@ class MultiServerCallable extends CancellableRegionServerCallable private boolean isCellBlock() { // This is not exact -- the configuration could have changed on us after connection was set up // but it will do for now. - ClusterConnection conn = getConnection(); + ConnectionImplementation conn = getConnection(); return conn.hasCellBlockSupport(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java index ece91bd1e0..c07a8e8a9a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java @@ -46,27 +46,27 @@ public abstract class RegionAdminServiceCallable implements RetryingCallable< protected final RpcControllerFactory rpcControllerFactory; private HBaseRpcController controller = null; - protected final ClusterConnection connection; + protected final ConnectionImplementation connection; protected HRegionLocation location; protected final TableName tableName; protected final byte[] row; protected final int replicaId; - public RegionAdminServiceCallable(ClusterConnection connection, + public RegionAdminServiceCallable(ConnectionImplementation connection, RpcControllerFactory rpcControllerFactory, TableName tableName, byte[] row) { this(connection, rpcControllerFactory, null, tableName, row); } - public RegionAdminServiceCallable(ClusterConnection connection, - RpcControllerFactory rpcControllerFactory, HRegionLocation location, - TableName tableName, byte[] row) { - this(connection, rpcControllerFactory, location, - tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID); + public RegionAdminServiceCallable(ConnectionImplementation connection, + RpcControllerFactory rpcControllerFactory, HRegionLocation location, TableName tableName, + byte[] row) { + this(connection, rpcControllerFactory, location, tableName, row, + RegionReplicaUtil.DEFAULT_REPLICA_ID); } - public RegionAdminServiceCallable(ClusterConnection connection, - RpcControllerFactory rpcControllerFactory, HRegionLocation location, - TableName tableName, byte[] row, int replicaId) { + public RegionAdminServiceCallable(ConnectionImplementation connection, + RpcControllerFactory rpcControllerFactory, HRegionLocation location, TableName tableName, + byte[] row, int replicaId) { this.connection = connection; this.rpcControllerFactory = rpcControllerFactory; this.location = location; @@ -95,7 +95,7 @@ public abstract class RegionAdminServiceCallable implements RetryingCallable< } public HRegionLocation getLocation(boolean useCache) throws IOException { - RegionLocations rl = getRegionLocations(connection, tableName, row, useCache, replicaId); + RegionLocations rl = getRegionLocations(useCache); if (rl == null) { throw new HBaseIOException(getExceptionMessage()); } @@ -138,9 +138,7 @@ public abstract class RegionAdminServiceCallable implements RetryingCallable< return ConnectionUtils.getPauseTime(pause, tries); } - public static RegionLocations getRegionLocations( - ClusterConnection connection, TableName tableName, byte[] row, - boolean useCache, int replicaId) + private RegionLocations getRegionLocations(boolean useCache) throws RetriesExhaustedException, DoNotRetryIOException, InterruptedIOException { RegionLocations rl; try { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java index 448302c854..80371b764a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java @@ -46,7 +46,7 @@ class RegionCoprocessorRpcChannel extends SyncCoprocessorRpcChannel { private static final Logger LOG = LoggerFactory.getLogger(RegionCoprocessorRpcChannel.class); private final TableName table; private final byte [] row; - private final ClusterConnection conn; + private final ConnectionImplementation conn; private byte[] lastRegion; private final int operationTimeout; private final RpcRetryingCallerFactory rpcCallerFactory; @@ -57,7 +57,7 @@ class RegionCoprocessorRpcChannel extends SyncCoprocessorRpcChannel { * @param table to connect to * @param row to locate region with */ - RegionCoprocessorRpcChannel(ClusterConnection conn, TableName table, byte[] row) { + RegionCoprocessorRpcChannel(ConnectionImplementation conn, TableName table, byte[] row) { this.table = table; this.row = row; this.conn = conn; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java index f709c440d6..407883c934 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; - import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -27,11 +26,12 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; /** * Implementations make a RPC call against a RegionService via a protobuf Service. @@ -160,10 +160,10 @@ public abstract class RegionServerCallable implements RetryingCallable } /** - * @return {@link ClusterConnection} instance used by this Callable. + * @return {@link ConnectionImplementation} instance used by this Callable. */ - protected ClusterConnection getConnection() { - return (ClusterConnection) this.connection; + protected ConnectionImplementation getConnection() { + return (ConnectionImplementation) this.connection; } protected HRegionLocation getLocation() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java index 53b96413f6..2ed037e8a9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java @@ -37,20 +37,13 @@ public class ReversedClientScanner extends ClientScanner { /** * Create a new ReversibleClientScanner for the specified table Note that the passed * {@link Scan}'s start row maybe changed. - * @param conf - * @param scan - * @param tableName - * @param connection - * @param pool - * @param primaryOperationTimeout - * @throws IOException */ public ReversedClientScanner(Configuration conf, Scan scan, TableName tableName, - ClusterConnection connection, RpcRetryingCallerFactory rpcFactory, + ConnectionImplementation connection, RpcRetryingCallerFactory rpcFactory, RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout) throws IOException { super(conf, scan, tableName, connection, rpcFactory, controllerFactory, pool, - primaryOperationTimeout); + primaryOperationTimeout); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java index 30e541cefe..6a325b26a9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java @@ -52,8 +52,8 @@ public class ReversedScannerCallable extends ScannerCallable { * @param rpcFactory to create an {@link com.google.protobuf.RpcController} to talk to the * regionserver */ - public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan, - ScanMetrics scanMetrics, RpcControllerFactory rpcFactory) { + public ReversedScannerCallable(ConnectionImplementation connection, TableName tableName, + Scan scan, ScanMetrics scanMetrics, RpcControllerFactory rpcFactory) { super(connection, tableName, scan, scanMetrics, rpcFactory); } @@ -66,8 +66,8 @@ public class ReversedScannerCallable extends ScannerCallable { * regionserver * @param replicaId the replica id */ - public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan, - ScanMetrics scanMetrics, RpcControllerFactory rpcFactory, int replicaId) { + public ReversedScannerCallable(ConnectionImplementation connection, TableName tableName, + Scan scan, ScanMetrics scanMetrics, RpcControllerFactory rpcFactory, int replicaId) { super(connection, tableName, scan, scanMetrics, rpcFactory, replicaId); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java index 4a31cff4a7..b2d06002e0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.client; +import static org.apache.hadoop.hbase.HConstants.PRIORITY_UNSET; + import java.io.IOException; import java.io.InterruptedIOException; import java.util.Collections; @@ -27,24 +29,22 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.ipc.HBaseRpcController; +import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.ipc.HBaseRpcController; -import org.apache.hadoop.hbase.ipc.RpcControllerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; - -import static org.apache.hadoop.hbase.HConstants.PRIORITY_UNSET; /** * Caller that goes to replica if the primary region does no answer within a configurable @@ -58,7 +58,7 @@ public class RpcRetryingCallerWithReadReplicas { LoggerFactory.getLogger(RpcRetryingCallerWithReadReplicas.class); protected final ExecutorService pool; - protected final ClusterConnection cConnection; + protected final ConnectionImplementation cConnection; protected final Configuration conf; protected final Get get; protected final TableName tableName; @@ -71,7 +71,7 @@ public class RpcRetryingCallerWithReadReplicas { public RpcRetryingCallerWithReadReplicas( RpcControllerFactory rpcControllerFactory, TableName tableName, - ClusterConnection cConnection, final Get get, + ConnectionImplementation cConnection, final Get get, ExecutorService pool, int retries, int operationTimeout, int rpcTimeout, int timeBeforeReplicas) { this.rpcControllerFactory = rpcControllerFactory; @@ -185,19 +185,14 @@ public class RpcRetryingCallerWithReadReplicas { } else { // We cannot get the primary replica location, it is possible that the region // server hosting meta is down, it needs to proceed to try cached replicas. - if (cConnection instanceof ConnectionImplementation) { - rl = ((ConnectionImplementation)cConnection).getCachedLocation(tableName, get.getRow()); - if (rl == null) { - // No cached locations - throw e; - } - - // Primary replica location is not known, skip primary replica - skipPrimary = true; - } else { - // For completeness + rl = cConnection.getCachedLocation(tableName, get.getRow()); + if (rl == null) { + // No cached locations throw e; } + + // Primary replica location is not known, skip primary replica + skipPrimary = true; } } @@ -316,9 +311,8 @@ public class RpcRetryingCallerWithReadReplicas { } static RegionLocations getRegionLocations(boolean useCache, int replicaId, - ClusterConnection cConnection, TableName tableName, byte[] row) + ConnectionImplementation cConnection, TableName tableName, byte[] row) throws RetriesExhaustedException, DoNotRetryIOException, InterruptedIOException { - RegionLocations rl; try { if (useCache) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java index 45b74ef938..bf7135fab4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java @@ -101,23 +101,24 @@ public class ScannerCallable extends ClientServiceCallable { * @param scanMetrics the ScanMetrics to used, if it is null, ScannerCallable won't collect * metrics * @param rpcControllerFactory factory to use when creating - * {@link com.google.protobuf.RpcController} + * {@link com.google.protobuf.RpcController} */ - public ScannerCallable(ClusterConnection connection, TableName tableName, Scan scan, + public ScannerCallable(ConnectionImplementation connection, TableName tableName, Scan scan, ScanMetrics scanMetrics, RpcControllerFactory rpcControllerFactory) { this(connection, tableName, scan, scanMetrics, rpcControllerFactory, 0); } + /** - * * @param connection * @param tableName * @param scan * @param scanMetrics * @param id the replicaId */ - public ScannerCallable(ClusterConnection connection, TableName tableName, Scan scan, + public ScannerCallable(ConnectionImplementation connection, TableName tableName, Scan scan, ScanMetrics scanMetrics, RpcControllerFactory rpcControllerFactory, int id) { - super(connection, tableName, scan.getStartRow(), rpcControllerFactory.newController(), scan.getPriority()); + super(connection, tableName, scan.getStartRow(), rpcControllerFactory.newController(), + scan.getPriority()); this.id = id; this.scan = scan; this.scanMetrics = scanMetrics; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java index bcb81f7e9d..fb60679b6e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hbase.client; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - import java.io.IOException; import java.io.InterruptedIOException; import java.util.HashSet; @@ -31,17 +29,18 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ScannerCallable.MoreResults; +import org.apache.hadoop.hbase.util.Pair; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.client.ScannerCallable.MoreResults; -import org.apache.hadoop.hbase.util.Pair; + +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * This class has the logic for handling scanners for regions with and without replicas. @@ -60,7 +59,7 @@ class ScannerCallableWithReplicas implements RetryingCallable { private static final Logger LOG = LoggerFactory.getLogger(ScannerCallableWithReplicas.class); volatile ScannerCallable currentScannerCallable; AtomicBoolean replicaSwitched = new AtomicBoolean(false); - final ClusterConnection cConnection; + final ConnectionImplementation cConnection; protected final ExecutorService pool; protected final int timeBeforeReplicas; private final Scan scan; @@ -74,10 +73,10 @@ class ScannerCallableWithReplicas implements RetryingCallable { private boolean someRPCcancelled = false; //required for testing purposes only private int regionReplication = 0; - public ScannerCallableWithReplicas(TableName tableName, ClusterConnection cConnection, + public ScannerCallableWithReplicas(TableName tableName, ConnectionImplementation cConnection, ScannerCallable baseCallable, ExecutorService pool, int timeBeforeReplicas, Scan scan, int retries, int scannerTimeout, int caching, Configuration conf, - RpcRetryingCaller caller) { + RpcRetryingCaller caller) { this.currentScannerCallable = baseCallable; this.cConnection = cConnection; this.pool = pool; @@ -156,14 +155,8 @@ class ScannerCallableWithReplicas implements RetryingCallable { } catch (RetriesExhaustedException | DoNotRetryIOException e) { // We cannot get the primary replica region location, it is possible that the region server // hosting meta table is down, it needs to proceed to try cached replicas directly. - if (cConnection instanceof ConnectionImplementation) { - rl = ((ConnectionImplementation) cConnection) - .getCachedLocation(tableName, currentScannerCallable.getRow()); - if (rl == null) { - throw e; - } - } else { - // For completeness + rl = cConnection.getCachedLocation(tableName, currentScannerCallable.getRow()); + if (rl == null) { throw e; } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java index 4a2ed8d747..fe20cec13c 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java @@ -169,16 +169,16 @@ public class TestAsyncProcess { return r; } - public MyAsyncProcess(ClusterConnection hc, Configuration conf) { - super(hc, conf, - new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf)); + public MyAsyncProcess(ConnectionImplementation hc, Configuration conf) { + super(hc, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf)); service = Executors.newFixedThreadPool(5); } - public MyAsyncProcess(ClusterConnection hc, Configuration conf, AtomicInteger nbThreads) { + public MyAsyncProcess(ConnectionImplementation hc, Configuration conf, + AtomicInteger nbThreads) { super(hc, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf)); - service = new ThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS, - new SynchronousQueue<>(), new CountingThreadFactory(nbThreads)); + service = new ThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS, new SynchronousQueue<>(), + new CountingThreadFactory(nbThreads)); } public AsyncRequestFuture submit(ExecutorService pool, TableName tableName, @@ -317,7 +317,8 @@ public class TestAsyncProcess { private final IOException ioe; - public AsyncProcessWithFailure(ClusterConnection hc, Configuration conf, IOException ioe) { + public AsyncProcessWithFailure(ConnectionImplementation hc, Configuration conf, + IOException ioe) { super(hc, conf); this.ioe = ioe; serverTrackerTimeout = 1L; @@ -325,7 +326,7 @@ public class TestAsyncProcess { @Override protected RpcRetryingCaller createCaller( - CancellableRegionServerCallable callable, int rpcTimeout) { + CancellableRegionServerCallable callable, int rpcTimeout) { callsCt.incrementAndGet(); return new CallerWithFailure(ioe); } @@ -367,7 +368,7 @@ public class TestAsyncProcess { customPrimarySleepMs.put(server, primaryMs); } - public MyAsyncProcessWithReplicas(ClusterConnection hc, Configuration conf) { + public MyAsyncProcessWithReplicas(ConnectionImplementation hc, Configuration conf) { super(hc, conf); } @@ -613,7 +614,7 @@ public class TestAsyncProcess { } private void doSubmitRequest(long maxHeapSizePerRequest, long putsHeapSize) throws Exception { - ClusterConnection conn = createHConnection(); + ConnectionImplementation conn = createHConnection(); final String defaultClazz = conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); final long defaultHeapSizePerRequest = conn.getConfiguration().getLong( @@ -709,7 +710,7 @@ public class TestAsyncProcess { @Test public void testSubmit() throws Exception { - ClusterConnection hc = createHConnection(); + ConnectionImplementation hc = createHConnection(); MyAsyncProcess ap = new MyAsyncProcess(hc, CONF); List puts = new ArrayList<>(1); @@ -721,7 +722,7 @@ public class TestAsyncProcess { @Test public void testSubmitWithCB() throws Exception { - ClusterConnection hc = createHConnection(); + ConnectionImplementation hc = createHConnection(); final AtomicInteger updateCalled = new AtomicInteger(0); Batch.Callback cb = new Batch.Callback() { @Override @@ -742,7 +743,7 @@ public class TestAsyncProcess { @Test public void testSubmitBusyRegion() throws Exception { - ClusterConnection conn = createHConnection(); + ConnectionImplementation conn = createHConnection(); final String defaultClazz = conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, @@ -770,7 +771,7 @@ public class TestAsyncProcess { @Test public void testSubmitBusyRegionServer() throws Exception { - ClusterConnection conn = createHConnection(); + ConnectionImplementation conn = createHConnection(); MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); final String defaultClazz = conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); @@ -827,7 +828,7 @@ public class TestAsyncProcess { @Test public void testSubmitTrue() throws IOException { - ClusterConnection conn = createHConnection(); + ConnectionImplementation conn = createHConnection(); final MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); final String defaultClazz = conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); @@ -920,7 +921,7 @@ public class TestAsyncProcess { @Test public void testTaskCountWithoutClientBackoffPolicy() throws IOException, InterruptedException { - ClusterConnection hc = createHConnection(); + ConnectionImplementation hc = createHConnection(); MyAsyncProcess ap = new MyAsyncProcess(hc, CONF); testTaskCount(ap); } @@ -930,7 +931,7 @@ public class TestAsyncProcess { Configuration copyConf = new Configuration(CONF); copyConf.setBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, true); MyClientBackoffPolicy bp = new MyClientBackoffPolicy(); - ClusterConnection conn = createHConnection(); + ConnectionImplementation conn = createHConnection(); Mockito.when(conn.getConfiguration()).thenReturn(copyConf); Mockito.when(conn.getStatisticsTracker()).thenReturn(ServerStatisticTracker.create(copyConf)); Mockito.when(conn.getBackoffPolicy()).thenReturn(bp); @@ -970,7 +971,7 @@ public class TestAsyncProcess { @Test public void testMaxTask() throws Exception { - ClusterConnection conn = createHConnection(); + ConnectionImplementation conn = createHConnection(); final String defaultClazz = conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, @@ -1029,19 +1030,20 @@ public class TestAsyncProcess { } } - private static ClusterConnection createHConnection() throws IOException { - ClusterConnection hc = createHConnectionCommon(); + private static ConnectionImplementation createHConnection() throws IOException { + ConnectionImplementation hc = createHConnectionCommon(); setMockLocation(hc, DUMMY_BYTES_1, new RegionLocations(loc1)); setMockLocation(hc, DUMMY_BYTES_2, new RegionLocations(loc2)); setMockLocation(hc, DUMMY_BYTES_3, new RegionLocations(loc3)); - Mockito.when(hc.locateRegions(Mockito.eq(DUMMY_TABLE), Mockito.anyBoolean(), - Mockito.anyBoolean())).thenReturn(Arrays.asList(loc1, loc2, loc3)); + Mockito + .when(hc.locateRegions(Mockito.eq(DUMMY_TABLE), Mockito.anyBoolean(), Mockito.anyBoolean())) + .thenReturn(Arrays.asList(loc1, loc2, loc3)); setMockLocation(hc, FAILS, new RegionLocations(loc2)); return hc; } - private static ClusterConnection createHConnectionWithReplicas() throws IOException { - ClusterConnection hc = createHConnectionCommon(); + private static ConnectionImplementation createHConnectionWithReplicas() throws IOException { + ConnectionImplementation hc = createHConnectionCommon(); setMockLocation(hc, DUMMY_BYTES_1, hrls1); setMockLocation(hc, DUMMY_BYTES_2, hrls2); setMockLocation(hc, DUMMY_BYTES_3, hrls3); @@ -1055,12 +1057,13 @@ public class TestAsyncProcess { for (HRegionLocation loc : hrls3.getRegionLocations()) { locations.add(loc); } - Mockito.when(hc.locateRegions(Mockito.eq(DUMMY_TABLE), Mockito.anyBoolean(), - Mockito.anyBoolean())).thenReturn(locations); + Mockito + .when(hc.locateRegions(Mockito.eq(DUMMY_TABLE), Mockito.anyBoolean(), Mockito.anyBoolean())) + .thenReturn(locations); return hc; } - private static void setMockLocation(ClusterConnection hc, byte[] row, + private static void setMockLocation(ConnectionImplementation hc, byte[] row, RegionLocations result) throws IOException { Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row), Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyInt())).thenReturn(result); @@ -1068,8 +1071,8 @@ public class TestAsyncProcess { Mockito.anyBoolean(), Mockito.anyBoolean())).thenReturn(result); } - private static ClusterConnection createHConnectionCommon() { - ClusterConnection hc = Mockito.mock(ClusterConnection.class); + private static ConnectionImplementation createHConnectionCommon() { + ConnectionImplementation hc = Mockito.mock(ConnectionImplementation.class); NonceGenerator ng = Mockito.mock(NonceGenerator.class); Mockito.when(ng.getNonceGroup()).thenReturn(HConstants.NO_NONCE); Mockito.when(hc.getNonceGenerator()).thenReturn(ng); @@ -1080,7 +1083,7 @@ public class TestAsyncProcess { @Test public void testHTablePutSuccess() throws Exception { - ClusterConnection conn = createHConnection(); + ConnectionImplementation conn = createHConnection(); MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE); BufferedMutatorImpl ht = new BufferedMutatorImpl(conn, bufferParam, ap); @@ -1097,7 +1100,7 @@ public class TestAsyncProcess { @Test public void testSettingWriteBufferPeriodicFlushParameters() throws Exception { - ClusterConnection conn = createHConnection(); + ConnectionImplementation conn = createHConnection(); MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); checkPeriodicFlushParameters(conn, ap, @@ -1114,11 +1117,8 @@ public class TestAsyncProcess { 1, BufferedMutator.MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS); } - private void checkPeriodicFlushParameters(ClusterConnection conn, - MyAsyncProcess ap, - long setTO, long expectTO, - long setTT, long expectTT - ) { + private void checkPeriodicFlushParameters(ConnectionImplementation conn, MyAsyncProcess ap, + long setTO, long expectTO, long setTT, long expectTT) { BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE); // The BufferedMutatorParams does nothing with the value @@ -1134,16 +1134,15 @@ public class TestAsyncProcess { // The BufferedMutatorImpl corrects illegal values (direct via setter) BufferedMutatorImpl ht2 = - new BufferedMutatorImpl(conn, createBufferedMutatorParams(ap, DUMMY_TABLE), ap); + new BufferedMutatorImpl(conn, createBufferedMutatorParams(ap, DUMMY_TABLE), ap); ht2.setWriteBufferPeriodicFlush(setTO, setTT); Assert.assertEquals(expectTO, ht2.getWriteBufferPeriodicFlushTimeoutMs()); Assert.assertEquals(expectTT, ht2.getWriteBufferPeriodicFlushTimerTickMs()); - } @Test public void testWriteBufferPeriodicFlushTimeoutMs() throws Exception { - ClusterConnection conn = createHConnection(); + ConnectionImplementation conn = createHConnection(); MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE); @@ -1210,7 +1209,7 @@ public class TestAsyncProcess { @Test public void testBufferedMutatorImplWithSharedPool() throws Exception { - ClusterConnection conn = createHConnection(); + ConnectionImplementation conn = createHConnection(); MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE); BufferedMutator ht = new BufferedMutatorImpl(conn, bufferParam, ap); @@ -1221,7 +1220,7 @@ public class TestAsyncProcess { @Test public void testFailedPutAndNewPut() throws Exception { - ClusterConnection conn = createHConnection(); + ConnectionImplementation conn = createHConnection(); MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE) .writeBufferSize(0); @@ -1266,7 +1265,7 @@ public class TestAsyncProcess { @Test public void testBatch() throws IOException, InterruptedException { - ClusterConnection conn = new MyConnectionImpl(CONF); + ConnectionImplementation conn = new MyConnectionImpl(CONF); HTable ht = (HTable) conn.getTable(DUMMY_TABLE); ht.multiAp = new MyAsyncProcess(conn, CONF); @@ -1297,7 +1296,7 @@ public class TestAsyncProcess { @Test public void testErrorsServers() throws IOException { Configuration configuration = new Configuration(CONF); - ClusterConnection conn = new MyConnectionImpl(configuration); + ConnectionImplementation conn = new MyConnectionImpl(configuration); MyAsyncProcess ap = new MyAsyncProcess(conn, configuration); BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE); BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap); @@ -1328,7 +1327,7 @@ public class TestAsyncProcess { Configuration copyConf = new Configuration(CONF); copyConf.setLong(HConstants.HBASE_RPC_READ_TIMEOUT_KEY, readTimeout); copyConf.setLong(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, writeTimeout); - ClusterConnection conn = new MyConnectionImpl(copyConf); + ConnectionImplementation conn = new MyConnectionImpl(copyConf); MyAsyncProcess ap = new MyAsyncProcess(conn, copyConf); try (HTable ht = (HTable) conn.getTable(DUMMY_TABLE)) { ht.multiAp = ap; @@ -1361,7 +1360,7 @@ public class TestAsyncProcess { @Test public void testErrors() throws IOException { - ClusterConnection conn = new MyConnectionImpl(CONF); + ConnectionImplementation conn = new MyConnectionImpl(CONF); AsyncProcessWithFailure ap = new AsyncProcessWithFailure(conn, CONF, new IOException("test")); BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE); BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap); @@ -1385,7 +1384,7 @@ public class TestAsyncProcess { @Test public void testCallQueueTooLarge() throws IOException { - ClusterConnection conn = new MyConnectionImpl(CONF); + ConnectionImplementation conn = new MyConnectionImpl(CONF); AsyncProcessWithFailure ap = new AsyncProcessWithFailure(conn, CONF, new CallQueueTooBigException()); BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE); @@ -1600,7 +1599,7 @@ public class TestAsyncProcess { // TODO: this is kind of timing dependent... perhaps it should detect from createCaller // that the replica call has happened and that way control the ordering. Configuration conf = new Configuration(); - ClusterConnection conn = createHConnectionWithReplicas(); + ConnectionImplementation conn = createHConnectionWithReplicas(); conf.setInt(AsyncProcess.PRIMARY_CALL_TIMEOUT_KEY, replicaAfterMs * 1000); if (retries >= 0) { conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries); @@ -1698,16 +1697,15 @@ public class TestAsyncProcess { } static class AsyncProcessForThrowableCheck extends AsyncProcess { - public AsyncProcessForThrowableCheck(ClusterConnection hc, Configuration conf) { - super(hc, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory( - conf)); + public AsyncProcessForThrowableCheck(ConnectionImplementation hc, Configuration conf) { + super(hc, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf)); } } @Test public void testUncheckedException() throws Exception { // Test the case pool.submit throws unchecked exception - ClusterConnection hc = createHConnection(); + ConnectionImplementation hc = createHConnection(); MyThreadPoolExecutor myPool = new MyThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<>(200)); @@ -1739,7 +1737,7 @@ public class TestAsyncProcess { final int retries = 1; myConf.setLong(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, specialPause); myConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries); - ClusterConnection conn = new MyConnectionImpl(myConf); + ConnectionImplementation conn = new MyConnectionImpl(myConf); AsyncProcessWithFailure ap = new AsyncProcessWithFailure(conn, myConf, new CallQueueTooBigException()); BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE); @@ -1798,7 +1796,7 @@ public class TestAsyncProcess { @Test public void testQueueRowAccess() throws Exception { - ClusterConnection conn = createHConnection(); + ConnectionImplementation conn = createHConnection(); BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, null, null, new BufferedMutatorParams(DUMMY_TABLE).writeBufferSize(100000)); Put p0 = new Put(DUMMY_BYTES_1).addColumn(DUMMY_BYTES_1, DUMMY_BYTES_1, DUMMY_BYTES_1); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcessWithRegionException.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcessWithRegionException.java index ffc4e5192f..2c24aaa7be 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcessWithRegionException.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcessWithRegionException.java @@ -175,8 +175,8 @@ public class TestAsyncProcessWithRegionException { } } - private static ClusterConnection createHConnection() throws IOException { - ClusterConnection hc = Mockito.mock(ClusterConnection.class); + private static ConnectionImplementation createHConnection() throws IOException { + ConnectionImplementation hc = Mockito.mock(ConnectionImplementation.class); NonceGenerator ng = Mockito.mock(NonceGenerator.class); Mockito.when(ng.getNonceGroup()).thenReturn(HConstants.NO_NONCE); Mockito.when(hc.getNonceGenerator()).thenReturn(ng); @@ -190,8 +190,8 @@ public class TestAsyncProcessWithRegionException { return hc; } - private static void setMockLocation(ClusterConnection hc, byte[] row, RegionLocations result) - throws IOException { + private static void setMockLocation(ConnectionImplementation hc, byte[] row, + RegionLocations result) throws IOException { Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row), Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyInt())).thenReturn(result); Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row), Mockito.anyBoolean(), @@ -201,7 +201,7 @@ public class TestAsyncProcessWithRegionException { private static class MyAsyncProcess extends AsyncProcess { private final ExecutorService service = Executors.newFixedThreadPool(5); - MyAsyncProcess(ClusterConnection hc, Configuration conf) { + MyAsyncProcess(ConnectionImplementation hc, Configuration conf) { super(hc, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf)); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java index f8e12954b1..647ea32a5e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java @@ -44,11 +44,10 @@ public class TestBufferedMutator { public TestName name = new TestName(); /** - * My BufferedMutator. - * Just to prove that I can insert a BM other than default. + * My BufferedMutator. Just to prove that I can insert a BM other than default. */ public static class MyBufferedMutator extends BufferedMutatorImpl { - MyBufferedMutator(ClusterConnection conn, RpcRetryingCallerFactory rpcCallerFactory, + MyBufferedMutator(ConnectionImplementation conn, RpcRetryingCallerFactory rpcCallerFactory, RpcControllerFactory rpcFactory, BufferedMutatorParams params) { super(conn, rpcCallerFactory, rpcFactory, params); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java index 48ca751179..9f1f6f3251 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java @@ -71,7 +71,7 @@ public class TestClientScanner { ExecutorService pool; Configuration conf; - ClusterConnection clusterConn; + ConnectionImplementation clusterConn; RpcRetryingCallerFactory rpcFactory; RpcControllerFactory controllerFactory; @@ -80,7 +80,7 @@ public class TestClientScanner { @Before public void setup() throws IOException { - clusterConn = Mockito.mock(ClusterConnection.class); + clusterConn = Mockito.mock(ConnectionImplementation.class); rpcFactory = Mockito.mock(RpcRetryingCallerFactory.class); controllerFactory = Mockito.mock(RpcControllerFactory.class); pool = Executors.newSingleThreadExecutor(); @@ -103,11 +103,11 @@ public class TestClientScanner { private boolean initialized = false; public MockClientScanner(final Configuration conf, final Scan scan, final TableName tableName, - ClusterConnection connection, RpcRetryingCallerFactory rpcFactory, + ConnectionImplementation connection, RpcRetryingCallerFactory rpcFactory, RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout) throws IOException { super(conf, scan, tableName, connection, rpcFactory, controllerFactory, pool, - primaryOperationTimeout); + primaryOperationTimeout); } @Override diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerViaMocks.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerViaMocks.java index cce4939279..c84d9bf304 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerViaMocks.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerViaMocks.java @@ -42,12 +42,12 @@ public class TestHTableMultiplexerViaMocks { HBaseClassTestRule.forClass(TestHTableMultiplexerViaMocks.class); private HTableMultiplexer mockMultiplexer; - private ClusterConnection mockConnection; + private ConnectionImplementation mockConnection; @Before public void setupTest() { mockMultiplexer = mock(HTableMultiplexer.class); - mockConnection = mock(ClusterConnection.class); + mockConnection = mock(ConnectionImplementation.class); // Call the real put(TableName, Put, int) method when(mockMultiplexer.put(any(TableName.class), any(), anyInt())).thenCallRealMethod(); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java index 1b554f7617..7eb2b94f07 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; @@ -46,7 +45,7 @@ public class TestReversedScannerCallable { HBaseClassTestRule.forClass(TestReversedScannerCallable.class); @Mock - private ClusterConnection connection; + private ConnectionImplementation connection; @Mock private Scan scan; @Mock diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java index 49697b8315..b4a9518632 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java @@ -25,11 +25,13 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ClientServiceCallable; -import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.RpcRetryingCaller; import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; import org.apache.hadoop.hbase.client.Table; @@ -50,10 +52,6 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; - /** * Tests bulk loading of HFiles with old secure Endpoint client for backward compatibility. Will be * removed when old non-secure client for backward compatibility is not supported. @@ -108,7 +106,7 @@ public class TestHRegionServerBulkLoadWithOldSecureEndpoint extends TestHRegionS } // bulk load HFiles - final ClusterConnection conn = (ClusterConnection) UTIL.getAdmin().getConnection(); + final Connection conn = UTIL.getAdmin().getConnection(); Table table = conn.getTable(tableName); final String bulkToken = new SecureBulkLoadEndpointClient(table).prepareBulkLoad(tableName); RpcControllerFactory rpcControllerFactory = new RpcControllerFactory(UTIL.getConfiguration()); @@ -133,23 +131,11 @@ public class TestHRegionServerBulkLoadWithOldSecureEndpoint extends TestHRegionS // Periodically do compaction to reduce the number of open file handles. if (numBulkLoads.get() % 5 == 0) { // 5 * 50 = 250 open file handles! - callable = new ClientServiceCallable(conn, tableName, Bytes.toBytes("aaa"), - rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { - @Override - protected Void rpcCall() throws Exception { - LOG.debug("compacting " + getLocation() + " for row " - + Bytes.toStringBinary(getRow())); - AdminProtos.AdminService.BlockingInterface server = - conn.getAdmin(getLocation().getServerName()); - CompactRegionRequest request = - RequestConverter.buildCompactRegionRequest( - getLocation().getRegionInfo().getRegionName(), true, null); - server.compactRegion(null, request); - numCompactions.incrementAndGet(); - return null; - } - }; - caller.callWithRetries(callable, Integer.MAX_VALUE); + try (RegionLocator locator = conn.getRegionLocator(tableName)) { + HRegionLocation loc = locator.getRegionLocation(Bytes.toBytes("aaa"), true); + conn.getAdmin().compactRegion(loc.getRegion().getRegionName()); + numCompactions.incrementAndGet(); + } } } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java index 30a3db9625..2a7b7cd105 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase; import java.io.IOException; import java.util.ArrayList; import java.util.Comparator; +import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Objects; @@ -28,7 +29,6 @@ import java.util.TreeSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterManager.ServiceType; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionInfo; @@ -37,10 +37,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; - /** * Manages the interactions with an already deployed distributed cluster (as opposed to * a pseudo-distributed, or mini/local cluster). This is used by integration and system tests. @@ -99,18 +95,6 @@ public class DistributedHBaseCluster extends HBaseCluster { } } - @Override - public AdminProtos.AdminService.BlockingInterface getAdminProtocol(ServerName serverName) - throws IOException { - return ((ClusterConnection)this.connection).getAdmin(serverName); - } - - @Override - public ClientProtos.ClientService.BlockingInterface getClientProtocol(ServerName serverName) - throws IOException { - return ((ClusterConnection)this.connection).getClient(serverName); - } - @Override public void startRegionServer(String hostname, int port) throws IOException { LOG.info("Starting RS on: " + hostname); @@ -262,13 +246,6 @@ public class DistributedHBaseCluster extends HBaseCluster { throw new IOException("did timeout waiting for service to start:" + serverName); } - - @Override - public MasterService.BlockingInterface getMasterAdminService() - throws IOException { - return ((ClusterConnection)this.connection).getMaster(); - } - @Override public void startMaster(String hostname, int port) throws IOException { LOG.info("Starting Master on: " + hostname + ":" + port); @@ -297,7 +274,7 @@ public class DistributedHBaseCluster extends HBaseCluster { long start = System.currentTimeMillis(); while (System.currentTimeMillis() - start < timeout) { try { - getMasterAdminService(); + connection.getAdmin().getClusterMetrics(EnumSet.of(ClusterMetrics.Option.HBASE_VERSION)); return true; } catch (MasterNotRunningException m) { LOG.warn("Master not started yet " + m); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java index efc59c7844..2a8ab75b55 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java @@ -36,8 +36,8 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.BufferedMutatorParams; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; @@ -98,7 +98,7 @@ public class TestMultiTableInputFormatBase { // canned responses. JobContext mockedJobContext = Mockito.mock(JobContext.class); Configuration c = HBaseConfiguration.create(); - c.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, MRSplitsConnection.class.getName()); + c.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, MRSplitsConnection.class.getName()); Mockito.when(mockedJobContext.getConfiguration()).thenReturn(c); // Invent a bunch of scans. Have each Scan go against a different table so a good spread. List scans = new ArrayList<>(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java index 29a92ee75c..43a021e180 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.*; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyBoolean; +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -40,8 +40,8 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.BufferedMutatorParams; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionLocator; @@ -90,7 +90,7 @@ public class TestTableInputFormatBase { public void testNonSuccessiveSplitsAreNotMerged() throws IOException { JobContext context = mock(JobContext.class); Configuration conf = HBaseConfiguration.create(); - conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, + conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, ConnectionForMergeTesting.class.getName()); conf.set(TableInputFormat.INPUT_TABLE, "testTable"); conf.setBoolean(TableInputFormatBase.MAPREDUCE_INPUT_AUTOBALANCE, true); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SharedConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SharedConnection.java index 9ad55b7177..7fe253f9fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SharedConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SharedConnection.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.BufferedMutatorParams; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Hbck; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.TableBuilder; import org.apache.yetus.audience.InterfaceAudience; @@ -90,4 +91,14 @@ public class SharedConnection implements Connection { public TableBuilder getTableBuilder(TableName tableName, ExecutorService pool) { return this.conn.getTableBuilder(tableName, pool); } + + @Override + public Hbck getHbck() throws IOException { + return this.conn.getHbck(); + } + + @Override + public Hbck getHbck(ServerName masterServer) throws IOException { + return this.conn.getHbck(masterServer); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java index 0ad77ba4e7..f6e3485d08 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java @@ -62,7 +62,7 @@ public interface AsyncClusterConnection extends AsyncConnection { List entries, int replicaId, int numRetries, long operationTimeoutNs); /** - * Return all the replicas for a region. Used for regiong replica replication. + * Return all the replicas for a region. Used for region replica replication. */ CompletableFuture getRegionLocations(TableName tableName, byte[] row, boolean reload); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java index 97ae7eadd6..285fc626c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java @@ -28,19 +28,16 @@ import java.util.stream.Stream; import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.favored.FavoredNodesManager; -import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; - -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; @@ -66,22 +63,15 @@ final class AssignmentManagerUtil { static GetRegionInfoResponse getRegionInfoResponse(final MasterProcedureEnv env, final ServerName regionLocation, final RegionInfo hri, boolean includeBestSplitRow) throws IOException { - // TODO: There is no timeout on this controller. Set one! - HBaseRpcController controller = - env.getMasterServices().getClusterConnection().getRpcControllerFactory().newController(); - final AdminService.BlockingInterface admin = - env.getMasterServices().getClusterConnection().getAdmin(regionLocation); + AsyncRegionServerAdmin admin = + env.getMasterServices().getAsyncClusterConnection().getRegionServerAdmin(regionLocation); GetRegionInfoRequest request = null; if (includeBestSplitRow) { request = RequestConverter.buildGetRegionInfoRequest(hri.getRegionName(), false, true); } else { request = RequestConverter.buildGetRegionInfoRequest(hri.getRegionName()); } - try { - return admin.getRegionInfo(controller, request); - } catch (ServiceException e) { - throw ProtobufUtil.handleRemoteException(e); - } + return FutureUtils.get(admin.getRegionInfo(request)); } private static void lock(List regionNodes) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 9e5f9e8c91..74cfead16f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -159,9 +159,6 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Ordering; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface; - /** * HBaseFsck (hbck) is a tool for checking and repairing region consistency and * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not @@ -4347,32 +4344,29 @@ public class HBaseFsck extends Configured implements Closeable { public synchronized Void call() throws IOException { errors.progress(); try { - BlockingInterface server = connection.getAdmin(rsinfo); - // list all online regions from this region server - List regions = ProtobufUtil.getOnlineRegions(server); + List regions = connection.getAdmin().getRegions(rsinfo); regions = filterRegions(regions); if (details) { - errors.detail("RegionServer: " + rsinfo.getServerName() + - " number of regions: " + regions.size()); - for (RegionInfo rinfo: regions) { - errors.detail(" " + rinfo.getRegionNameAsString() + - " id: " + rinfo.getRegionId() + - " encoded_name: " + rinfo.getEncodedName() + - " start: " + Bytes.toStringBinary(rinfo.getStartKey()) + - " end: " + Bytes.toStringBinary(rinfo.getEndKey())); + errors.detail( + "RegionServer: " + rsinfo.getServerName() + " number of regions: " + regions.size()); + for (RegionInfo rinfo : regions) { + errors.detail(" " + rinfo.getRegionNameAsString() + " id: " + rinfo.getRegionId() + + " encoded_name: " + rinfo.getEncodedName() + " start: " + + Bytes.toStringBinary(rinfo.getStartKey()) + " end: " + + Bytes.toStringBinary(rinfo.getEndKey())); } } // check to see if the existence of this region matches the region in META - for (RegionInfo r:regions) { + for (RegionInfo r : regions) { HbckInfo hbi = hbck.getOrCreateInfo(r.getEncodedName()); hbi.addServer(r, rsinfo); } - } catch (IOException e) { // unable to connect to the region server. - errors.reportError(ERROR_CODE.RS_CONNECT_FAILURE, "RegionServer: " + rsinfo.getServerName() + - " Unable to fetch region information. " + e); + } catch (IOException e) { // unable to connect to the region server. + errors.reportError(ERROR_CODE.RS_CONNECT_FAILURE, + "RegionServer: " + rsinfo.getServerName() + " Unable to fetch region information. " + e); throw e; } return null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java index 58057932bd..d095fa361d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java @@ -26,19 +26,17 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.client.coprocessor.Batch; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Provides ability to create multiple Connection instances and allows to process a batch of @@ -112,14 +110,11 @@ public class MultiHConnection { * @param callback to run when results are in * @throws IOException If IO failure occurs */ - @SuppressWarnings("deprecation") public void processBatchCallback(List actions, TableName tableName, Object[] results, Batch.Callback callback) throws IOException { // Currently used by RegionStateStore - ClusterConnection conn = - (ClusterConnection) connections[ThreadLocalRandom.current().nextInt(noOfConnections)]; - - HTable.doBatchWithCallback(actions, results, callback, conn, batchPool, tableName); + HTable.doBatchWithCallback(actions, results, callback, + connections[ThreadLocalRandom.current().nextInt(noOfConnections)], batchPool, tableName); } // Copied from ConnectionImplementation.getBatchPool() diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java index 1b58634807..1119f3ee1d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java @@ -39,27 +39,26 @@ import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; -import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.NoServerForRegionException; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; -import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; @@ -551,7 +550,7 @@ public class RegionSplitter { } // make sure this region wasn't already split - byte[] sk = regionLoc.getRegionInfo().getStartKey(); + byte[] sk = regionLoc.getRegion().getStartKey(); if (sk.length != 0) { if (Bytes.equals(split, sk)) { LOG.debug("Region already split on " @@ -706,14 +705,11 @@ public class RegionSplitter { getTableDirAndSplitFile(connection.getConfiguration(), tableName); Path tableDir = tableDirAndSplitFile.getFirst(); FileSystem fs = tableDir.getFileSystem(connection.getConfiguration()); - // Clear the cache to forcibly refresh region information - ((ClusterConnection)connection).clearRegionCache(); TableDescriptor htd = null; try (Table table = connection.getTable(tableName)) { htd = table.getDescriptor(); } try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) { - // for every region that hasn't been verified as a finished split for (Pair region : regionList) { byte[] start = region.getFirst(); @@ -721,7 +717,7 @@ public class RegionSplitter { // see if the new split daughter region has come online try { - HRegionInfo dri = regionLocator.getRegionLocation(split).getRegionInfo(); + RegionInfo dri = regionLocator.getRegionLocation(split, true).getRegion(); if (dri.isOffline() || !Bytes.equals(dri.getStartKey(), split)) { logicalSplitting.add(region); continue; @@ -736,10 +732,10 @@ public class RegionSplitter { try { // when a daughter region is opened, a compaction is triggered // wait until compaction completes for both daughter regions - LinkedList check = Lists.newLinkedList(); - check.add(regionLocator.getRegionLocation(start).getRegionInfo()); - check.add(regionLocator.getRegionLocation(split).getRegionInfo()); - for (HRegionInfo hri : check.toArray(new HRegionInfo[check.size()])) { + LinkedList check = Lists.newLinkedList(); + check.add(regionLocator.getRegionLocation(start).getRegion()); + check.add(regionLocator.getRegionLocation(split).getRegion()); + for (RegionInfo hri : check.toArray(new RegionInfo[check.size()])) { byte[] sk = hri.getStartKey(); if (sk.length == 0) sk = splitAlgo.firstRow(); @@ -768,7 +764,6 @@ public class RegionSplitter { } catch (NoServerForRegionException nsfre) { LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start)); physicalSplitting.add(region); - ((ClusterConnection)connection).clearRegionCache(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java index d760a7dd8a..8020553e4f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase; import java.io.Closeable; import java.io.IOException; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.RegionInfoBuilder; @@ -28,10 +27,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; - /** * This class defines methods that can help with managing HBase clusters * from unit tests and system tests. There are 3 types of cluster deployments: @@ -97,24 +92,6 @@ public abstract class HBaseCluster implements Closeable, Configurable { return initialClusterStatus; } - /** - * Returns an {@link MasterService.BlockingInterface} to the active master - */ - public abstract MasterService.BlockingInterface getMasterAdminService() - throws IOException; - - /** - * Returns an AdminProtocol interface to the regionserver - */ - public abstract AdminService.BlockingInterface getAdminProtocol(ServerName serverName) - throws IOException; - - /** - * Returns a ClientProtocol interface to the regionserver - */ - public abstract ClientService.BlockingInterface getClientProtocol(ServerName serverName) - throws IOException; - /** * Starts a new region server on the given hostname or if this is a mini/local cluster, * starts a region server locally. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 796dbc30e4..92ea58ea8e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -64,7 +64,6 @@ import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; import org.apache.hadoop.hbase.Waiter.Predicate; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.BufferedMutator; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; @@ -3072,7 +3071,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { * Returns an {@link Hbck} instance. Needs be closed when done. */ public Hbck getHbck() throws IOException { - return ((ClusterConnection) getConnection()).getHbck(); + return getConnection().getHbck(); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java index 473eb74abf..92cfddfad9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -24,7 +24,6 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.master.HMaster; @@ -42,9 +41,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; /** @@ -517,15 +513,6 @@ public class MiniHBaseCluster extends HBaseCluster { return t; } - /** - * Returns the current active master, if available. - * @return the active HMaster, null if none is active. - */ - @Override - public MasterService.BlockingInterface getMasterAdminService() { - return this.hbaseCluster.getActiveMaster().getMasterRpcServices(); - } - /** * Returns the current active master, if available. * @return the active HMaster, null if none is active. @@ -921,15 +908,4 @@ public class MiniHBaseCluster extends HBaseCluster { } return -1; } - - @Override - public AdminService.BlockingInterface getAdminProtocol(ServerName serverName) throws IOException { - return getRegionServer(getRegionServerIndex(serverName)).getRSRpcServices(); - } - - @Override - public ClientService.BlockingInterface getClientProtocol(ServerName serverName) - throws IOException { - return getRegionServer(getRegionServerIndex(serverName)).getRSRpcServices(); - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java index a8beab65e8..8852ab4af1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java @@ -20,26 +20,25 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.RegionLocations; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; -import org.apache.hadoop.hbase.coprocessor.RegionObserver; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.util.Threads; import org.mockito.Mockito; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; + /** - * {@link ClusterConnection} testing utility. + * {@link ConnectionImplementation} testing utility. */ public class HConnectionTestingUtility { /* @@ -48,21 +47,20 @@ public class HConnectionTestingUtility { * {@link ConnectionImplementation} innards to HBaseTestingUtility to give it access. */ /** - * Get a Mocked {@link ClusterConnection} that goes with the passed conf - * configuration instance. Minimally the mock will return - * <code>conf</conf> when {@link ClusterConnection#getConfiguration()} is invoked. - * Be sure to shutdown the connection when done by calling - * {@link Connection#close()} else it will stick around; this is probably not what you want. + * Get a Mocked {@link ConnectionImplementation} that goes with the passed conf + * configuration instance. Minimally the mock will return <code>conf</conf> when + * {@link ConnectionImplementation#getConfiguration()} is invoked. Be sure to shutdown the + * connection when done by calling {@link Connection#close()} else it will stick around; this is + * probably not what you want. * @param conf configuration * @return ClusterConnection object for conf - * @throws ZooKeeperConnectionException */ - public static ClusterConnection getMockedConnection(final Configuration conf) - throws ZooKeeperConnectionException { + public static ConnectionImplementation getMockedConnection(final Configuration conf) + throws ZooKeeperConnectionException { ConnectionImplementation connection = Mockito.mock(ConnectionImplementation.class); Mockito.when(connection.getConfiguration()).thenReturn(conf); - Mockito.when(connection.getRpcControllerFactory()).thenReturn( - Mockito.mock(RpcControllerFactory.class)); + Mockito.when(connection.getRpcControllerFactory()) + .thenReturn(Mockito.mock(RpcControllerFactory.class)); // we need a real retrying caller RpcRetryingCallerFactory callerFactory = new RpcRetryingCallerFactory(conf); Mockito.when(connection.getRpcRetryingCallerFactory()).thenReturn(callerFactory); @@ -71,7 +69,7 @@ public class HConnectionTestingUtility { /** * Calls {@link #getMockedConnection(Configuration)} and then mocks a few - * more of the popular {@link ClusterConnection} methods so they do 'normal' + * more of the popular {@link ConnectionImplementation} methods so they do 'normal' * operation (see return doc below for list). Be sure to shutdown the * connection when done by calling {@link Connection#close()} else it will stick around; * this is probably not what you want. @@ -86,17 +84,16 @@ public class HConnectionTestingUtility { * @param hri RegionInfo to include in the location returned when * getRegionLocator is called on the mocked connection * @return Mock up a connection that returns a {@link Configuration} when - * {@link ClusterConnection#getConfiguration()} is called, a 'location' when - * {@link ClusterConnection#getRegionLocation(org.apache.hadoop.hbase.TableName, byte[], boolean)} + * {@link ConnectionImplementation#getConfiguration()} is called, a 'location' when + * {@link ConnectionImplementation#getRegionLocation(TableName, byte[], boolean)} * is called, * and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when - * {@link ClusterConnection#getAdmin(ServerName)} is called, returns the passed + * {@link ConnectionImplementation#getAdmin(ServerName)} is called, returns the passed * {@link ClientProtos.ClientService.BlockingInterface} instance when - * {@link ClusterConnection#getClient(ServerName)} is called (Be sure to call + * {@link ConnectionImplementation#getClient(ServerName)} is called (Be sure to call * {@link Connection#close()} when done with this mocked Connection. - * @throws IOException */ - public static ClusterConnection getMockedConnectionAndDecorate(final Configuration conf, + public static ConnectionImplementation getMockedConnectionAndDecorate(final Configuration conf, final AdminProtos.AdminService.BlockingInterface admin, final ClientProtos.ClientService.BlockingInterface client, final ServerName sn, final RegionInfo hri) @@ -141,18 +138,16 @@ public class HConnectionTestingUtility { } /** - * Get a Mockito spied-upon {@link ClusterConnection} that goes with the passed - * conf configuration instance. - * Be sure to shutdown the connection when done by calling - * {@link Connection#close()} else it will stick around; this is probably not what you want. + * Get a Mockito spied-upon {@link ConnectionImplementation} that goes with the passed + * conf configuration instance. Be sure to shutdown the connection when done by + * calling {@link Connection#close()} else it will stick around; this is probably not what you + * want. * @param conf configuration - * @return ClusterConnection object for conf - * @throws ZooKeeperConnectionException - * [Dead link]: See also - * {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)} + * @return ClusterConnection object for conf [Dead link]: See also + * {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)} */ - public static ClusterConnection getSpiedConnection(final Configuration conf) - throws IOException { + public static ConnectionImplementation getSpiedConnection(final Configuration conf) + throws IOException { ConnectionImplementation connection = Mockito.spy(new ConnectionImplementation(conf, null, null)); return connection; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index 40de30a59b..f9690b5eae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -632,8 +632,8 @@ public class TestAdmin1 { assertFalse(this.admin.tableExists(tableName)); } - protected void verifyRoundRobinDistribution(ClusterConnection c, RegionLocator regionLocator, int - expectedRegions) throws IOException { + private void verifyRoundRobinDistribution(ConnectionImplementation c, + RegionLocator regionLocator, int expectedRegions) throws IOException { int numRS = c.getCurrentNrHRS(); List regions = regionLocator.getAllRegionLocations(); Map> server2Regions = new HashMap<>(); @@ -652,13 +652,14 @@ public class TestAdmin1 { // which contains less regions by intention. numRS--; } - float average = (float) expectedRegions/numRS; - int min = (int)Math.floor(average); - int max = (int)Math.ceil(average); + float average = (float) expectedRegions / numRS; + int min = (int) Math.floor(average); + int max = (int) Math.ceil(average); for (List regionList : server2Regions.values()) { - assertTrue("numRS=" + numRS + ", min=" + min + ", max=" + max + - ", size=" + regionList.size() + ", tablesOnMaster=" + tablesOnMaster, - regionList.size() == min || regionList.size() == max); + assertTrue( + "numRS=" + numRS + ", min=" + min + ", max=" + max + ", size=" + regionList.size() + + ", tablesOnMaster=" + tablesOnMaster, + regionList.size() == min || regionList.size() == max); } } @@ -739,7 +740,7 @@ public class TestAdmin1 { List regions; Iterator hris; RegionInfo hri; - ClusterConnection conn = (ClusterConnection) TEST_UTIL.getConnection(); + ConnectionImplementation conn = (ConnectionImplementation) TEST_UTIL.getConnection(); try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) { regions = l.getAllRegionLocations(); @@ -1257,13 +1258,9 @@ public class TestAdmin1 { byte[][] nameofRegionsToMerge = new byte[2][]; nameofRegionsToMerge[0] = regions.get(1).getFirst().getEncodedNameAsBytes(); nameofRegionsToMerge[1] = regions.get(2).getFirst().getEncodedNameAsBytes(); - MergeTableRegionsRequest request = RequestConverter - .buildMergeTableRegionsRequest( - nameofRegionsToMerge, - true, - HConstants.NO_NONCE, - HConstants.NO_NONCE); - ((ClusterConnection) TEST_UTIL.getAdmin().getConnection()).getMaster() + MergeTableRegionsRequest request = RequestConverter.buildMergeTableRegionsRequest( + nameofRegionsToMerge, true, HConstants.NO_NONCE, HConstants.NO_NONCE); + ((ConnectionImplementation) TEST_UTIL.getAdmin().getConnection()).getMaster() .mergeTableRegions(null, request); } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException m) { Throwable t = m.getCause(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java index 97c7b9b579..ecc7cb0a8f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java @@ -782,7 +782,7 @@ public class TestAdmin2 { Assert.assertNotNull(store); Assert.assertEquals(expectedStoreFilesSize, store.getSize()); - ClusterConnection conn = ((ClusterConnection) admin.getConnection()); + ConnectionImplementation conn = (ConnectionImplementation) admin.getConnection(); HBaseRpcController controller = conn.getRpcControllerFactory().newController(); for (int i = 0; i < 10; i++) { RegionInfo ri = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java index 1750926785..1c8b4cd2f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java @@ -274,7 +274,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase { private void verifyRoundRobinDistribution(List regions, int expectedRegions) throws IOException { - int numRS = ((ClusterConnection) TEST_UTIL.getConnection()).getCurrentNrHRS(); + int numRS = ((ConnectionImplementation) TEST_UTIL.getConnection()).getCurrentNrHRS(); Map> server2Regions = new HashMap<>(); regions.stream().forEach((loc) -> { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java index 4e5665d871..2c18474a73 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java @@ -111,8 +111,8 @@ public class TestCISleep extends AbstractTestCITimeout { } RegionAdminServiceCallable regionAdminServiceCallable = - new RegionAdminServiceCallable((ClusterConnection) TEST_UTIL.getConnection(), - new RpcControllerFactory(TEST_UTIL.getConfiguration()), tableName, FAM_NAM) { + new RegionAdminServiceCallable((ConnectionImplementation) TEST_UTIL.getConnection(), + new RpcControllerFactory(TEST_UTIL.getConfiguration()), tableName, FAM_NAM) { @Override public Object call(HBaseRpcController controller) throws Exception { return null; @@ -126,9 +126,9 @@ public class TestCISleep extends AbstractTestCITimeout { assertTrue(pauseTime <= (baseTime * HConstants.RETRY_BACKOFF[i] * 1.01f)); } - try ( - MasterCallable masterCallable = new MasterCallable(TEST_UTIL.getConnection(), - new RpcControllerFactory(TEST_UTIL.getConfiguration())) { + try (MasterCallable masterCallable = + new MasterCallable((ConnectionImplementation) TEST_UTIL.getConnection(), + new RpcControllerFactory(TEST_UTIL.getConfiguration())) { @Override protected Object rpcCall() throws Exception { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java index d6f32f5483..ae217cd820 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java @@ -94,7 +94,8 @@ public class TestClientPushback { public void testClientTracksServerPushback() throws Exception{ Configuration conf = UTIL.getConfiguration(); - ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(conf); + ConnectionImplementation conn = + (ConnectionImplementation) ConnectionFactory.createConnection(conf); BufferedMutatorImpl mutator = (BufferedMutatorImpl) conn.getBufferedMutator(tableName); HRegionServer rs = UTIL.getHBaseCluster().getRegionServer(0); @@ -182,7 +183,8 @@ public class TestClientPushback { @Test public void testMutateRowStats() throws IOException { Configuration conf = UTIL.getConfiguration(); - ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(conf); + ConnectionImplementation conn = + (ConnectionImplementation) ConnectionFactory.createConnection(conf); Table table = conn.getTable(tableName); HRegionServer rs = UTIL.getHBaseCluster().getRegionServer(0); Region region = rs.getRegions(tableName).get(0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java index 4d9f39bebe..4ac7b53b87 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java @@ -837,7 +837,7 @@ public class TestConnectionImplementation { * from ZK by the client. */ @Test - public void testConnection() throws Exception{ + public void testConnection() throws Exception { // We create an empty config and add the ZK address. Configuration c = new Configuration(); c.set(HConstants.ZOOKEEPER_QUORUM, @@ -846,7 +846,8 @@ public class TestConnectionImplementation { TEST_UTIL.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT)); // This should be enough to connect - ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(c); + ConnectionImplementation conn = + (ConnectionImplementation) ConnectionFactory.createConnection(c); assertTrue(conn.isMasterRunning()); conn.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java index 1315d4afbf..ff151fe6a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java @@ -159,7 +159,7 @@ public class TestFromClientSide3 { // connection needed for poll-wait HRegionLocation loc = locator.getRegionLocation(row, true); AdminProtos.AdminService.BlockingInterface server = - ((ClusterConnection) admin.getConnection()).getAdmin(loc.getServerName()); + ((ConnectionImplementation) admin.getConnection()).getAdmin(loc.getServerName()); byte[] regName = loc.getRegionInfo().getRegionName(); for (int i = 0; i < nFlushes; i++) { @@ -289,7 +289,7 @@ public class TestFromClientSide3 { final TableName tableName = TableName.valueOf(name.getMethodName()); Table hTable = TEST_UTIL.createTable(tableName, FAMILY, 10); Admin admin = TEST_UTIL.getAdmin(); - ClusterConnection connection = (ClusterConnection) TEST_UTIL.getConnection(); + ConnectionImplementation connection = (ConnectionImplementation) TEST_UTIL.getConnection(); // Create 3 store files. byte[] row = Bytes.toBytes(random.nextInt()); @@ -681,7 +681,7 @@ public class TestFromClientSide3 { @Test public void testConnectionDefaultUsesCodec() throws Exception { - ClusterConnection con = (ClusterConnection) TEST_UTIL.getConnection(); + ConnectionImplementation con = (ConnectionImplementation) TEST_UTIL.getConnection(); assertTrue(con.hasCellBlockSupport()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java index a1026a943a..c5279e4c25 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHBaseAdminNoCluster.java @@ -95,7 +95,8 @@ public class TestHBaseAdminNoCluster { configuration.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, count); // Get mocked connection. Getting the connection will register it so when HBaseAdmin is // constructed with same configuration, it will find this mocked connection. - ClusterConnection connection = HConnectionTestingUtility.getMockedConnection(configuration); + ConnectionImplementation connection = + HConnectionTestingUtility.getMockedConnection(configuration); // Mock so we get back the master interface. Make it so when createTable is called, we throw // the PleaseHoldException. MasterKeepAliveConnection masterAdmin = Mockito.mock(MasterKeepAliveConnection.class); @@ -292,7 +293,7 @@ public class TestHBaseAdminNoCluster { final int count = 10; configuration.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, count); - ClusterConnection connection = mock(ClusterConnection.class); + ConnectionImplementation connection = mock(ConnectionImplementation.class); when(connection.getConfiguration()).thenReturn(configuration); MasterKeepAliveConnection masterAdmin = Mockito.mock(MasterKeepAliveConnection.class, new Answer() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java similarity index 85% rename from hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java rename to hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java index 5d36ea90f9..47358869ad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase; +package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; @@ -26,11 +26,19 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.NavigableMap; -import org.apache.hadoop.hbase.client.ClusterConnection; -import org.apache.hadoop.hbase.client.HConnectionTestingUtility; -import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; -import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellScannable; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; @@ -99,21 +107,19 @@ public class TestMetaTableAccessorNoCluster { Result r = Result.create(kvs); assertNull(MetaTableAccessor.getRegionInfo(r)); - byte [] f = HConstants.CATALOG_FAMILY; + byte[] f = HConstants.CATALOG_FAMILY; // Make a key value that doesn't have the expected qualifier. - kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, - HConstants.SERVER_QUALIFIER, f)); + kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.SERVER_QUALIFIER, f)); r = Result.create(kvs); assertNull(MetaTableAccessor.getRegionInfo(r)); // Make a key that does not have a regioninfo value. - kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, - HConstants.REGIONINFO_QUALIFIER, f)); + kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.REGIONINFO_QUALIFIER, f)); RegionInfo hri = MetaTableAccessor.getRegionInfo(Result.create(kvs)); assertTrue(hri == null); // OK, give it what it expects kvs.clear(); - kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, - HConstants.REGIONINFO_QUALIFIER, RegionInfo.toByteArray(RegionInfoBuilder.FIRST_META_REGIONINFO))); + kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.REGIONINFO_QUALIFIER, + RegionInfo.toByteArray(RegionInfoBuilder.FIRST_META_REGIONINFO))); hri = MetaTableAccessor.getRegionInfo(Result.create(kvs)); assertNotNull(hri); assertTrue(RegionInfo.COMPARATOR.compare(hri, RegionInfoBuilder.FIRST_META_REGIONINFO) == 0); @@ -123,8 +129,6 @@ public class TestMetaTableAccessorNoCluster { * Test that MetaTableAccessor will ride over server throwing * "Server not running" IOEs. * @see HBASE-3446 - * @throws IOException - * @throws InterruptedException */ @Test public void testRideOverServerNotRunning() @@ -135,7 +139,7 @@ public class TestMetaTableAccessorNoCluster { // This is a servername we use in a few places below. ServerName sn = ServerName.valueOf("example.com", 1234, System.currentTimeMillis()); - ClusterConnection connection = null; + ConnectionImplementation connection = null; try { // Mock an ClientProtocol. Our mock implementation will fail a few // times when we go to open a scanner. @@ -155,7 +159,7 @@ public class TestMetaTableAccessorNoCluster { RegionInfo.toByteArray(RegionInfoBuilder.FIRST_META_REGIONINFO))); kvs.add(new KeyValue(rowToVerify, HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, - Bytes.toBytes(sn.getHostAndPort()))); + Bytes.toBytes(sn.getAddress().toString()))); kvs.add(new KeyValue(rowToVerify, HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn.getStartcode()))); @@ -190,9 +194,8 @@ public class TestMetaTableAccessorNoCluster { // Return the RegionLocations object when locateRegion // The ugly format below comes of 'Important gotcha on spying real objects!' from // http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html - Mockito.doReturn(rl).when - (connection).locateRegion((TableName)Mockito.any(), (byte[])Mockito.any(), - Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyInt()); + Mockito.doReturn(rl).when(connection).locateRegion((TableName) Mockito.any(), + (byte[]) Mockito.any(), Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyInt()); // Now shove our HRI implementation into the spied-upon connection. Mockito.doReturn(implementation). @@ -202,14 +205,17 @@ public class TestMetaTableAccessorNoCluster { NavigableMap hris = MetaTableAccessor.getServerUserRegions(connection, sn); assertEquals(1, hris.size()); - assertTrue(RegionInfo.COMPARATOR.compare(hris.firstEntry().getKey(), RegionInfoBuilder.FIRST_META_REGIONINFO) == 0); + assertTrue(RegionInfo.COMPARATOR.compare(hris.firstEntry().getKey(), + RegionInfoBuilder.FIRST_META_REGIONINFO) == 0); assertTrue(Bytes.equals(rowToVerify, hris.firstEntry().getValue().getRow())); // Finally verify that scan was called four times -- three times // with exception and then on 4th attempt we succeed Mockito.verify(implementation, Mockito.times(4)). scan((RpcController)Mockito.any(), (ScanRequest)Mockito.any()); } finally { - if (connection != null && !connection.isClosed()) connection.close(); + if (connection != null && !connection.isClosed()) { + connection.close(); + } zkw.close(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java index 3b14b7f0a5..830022f2e0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java @@ -253,7 +253,7 @@ public class TestMetaWithReplicas { util.getHBaseClusterInterface().killRegionServer(primary); util.getHBaseClusterInterface().waitForRegionServerToStop(primary, 60000); } - ((ClusterConnection)c).clearRegionCache(); + ((ConnectionImplementation)c).clearRegionCache(); } LOG.info("Running GETs"); Get get = null; @@ -276,7 +276,7 @@ public class TestMetaWithReplicas { util.getHBaseClusterInterface().startRegionServer(primary.getHostname(), 0); util.getHBaseClusterInterface().waitForActiveAndReadyMaster(); LOG.info("Master active!"); - ((ClusterConnection)c).clearRegionCache(); + ((ConnectionImplementation)c).clearRegionCache(); } conf.setBoolean(HConstants.USE_META_REPLICAS, false); LOG.info("Running GETs no replicas"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java index 7d36e99583..50c9bd8d9e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java @@ -553,7 +553,7 @@ public class TestMultiParallel { }; NonceGenerator oldCnm = - ConnectionUtils.injectNonceGeneratorForTesting((ClusterConnection)connection, cnm); + ConnectionUtils.injectNonceGeneratorForTesting((ConnectionImplementation) connection, cnm); // First test sequential requests. try { @@ -615,7 +615,8 @@ public class TestMultiParallel { validateResult(result, QUALIFIER, Bytes.toBytes((numRequests / 2) + 1L)); table.close(); } finally { - ConnectionImplementation.injectNonceGeneratorForTesting((ClusterConnection) connection, oldCnm); + ConnectionImplementation.injectNonceGeneratorForTesting((ConnectionImplementation) connection, + oldCnm); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java index 311f65185e..4e4735dc7e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java @@ -229,7 +229,7 @@ public class TestReplicasClient { @Before public void before() throws IOException { - ((ClusterConnection) HTU.getAdmin().getConnection()).clearRegionCache(); + ((ConnectionImplementation) HTU.getAdmin().getConnection()).clearRegionCache(); try { openRegion(hriPrimary); } catch (Exception ignored) { @@ -251,7 +251,7 @@ public class TestReplicasClient { } catch (Exception ignored) { } - ((ClusterConnection) HTU.getAdmin().getConnection()).clearRegionCache(); + ((ConnectionImplementation) HTU.getAdmin().getConnection()).clearRegionCache(); } private HRegionServer getRS() { @@ -329,7 +329,7 @@ public class TestReplicasClient { public void testLocations() throws Exception { byte[] b1 = "testLocations".getBytes(); openRegion(hriSecondary); - ClusterConnection hc = (ClusterConnection) HTU.getAdmin().getConnection(); + ConnectionImplementation hc = (ConnectionImplementation) HTU.getAdmin().getConnection(); try { hc.clearRegionCache(); @@ -572,7 +572,7 @@ public class TestReplicasClient { LOG.info("get works and is not stale done"); //reset - ClusterConnection connection = (ClusterConnection) HTU.getConnection(); + ConnectionImplementation connection = (ConnectionImplementation) HTU.getConnection(); Counter hedgedReadOps = connection.getConnectionMetrics().hedgedReadOps; Counter hedgedReadWin = connection.getConnectionMetrics().hedgedReadWin; hedgedReadOps.dec(hedgedReadOps.getCount()); @@ -639,7 +639,7 @@ public class TestReplicasClient { Thread.sleep(1000 + REFRESH_PERIOD * 2); - AsyncProcess ap = ((ClusterConnection) HTU.getConnection()).getAsyncProcess(); + AsyncProcess ap = ((ConnectionImplementation) HTU.getConnection()).getAsyncProcess(); // Make primary slowdown SlowMeCopro.getPrimaryCdl().set(new CountDownLatch(1)); @@ -655,16 +655,14 @@ public class TestReplicasClient { gets.add(g); Object[] results = new Object[2]; - int operationTimeout = ((ClusterConnection) HTU.getConnection()).getConnectionConfiguration().getOperationTimeout(); - int readTimeout = ((ClusterConnection) HTU.getConnection()).getConnectionConfiguration().getReadRpcTimeout(); - AsyncProcessTask task = AsyncProcessTask.newBuilder() - .setPool(HTable.getDefaultExecutor(HTU.getConfiguration())) - .setTableName(table.getName()) - .setRowAccess(gets) - .setResults(results) - .setOperationTimeout(operationTimeout) - .setRpcTimeout(readTimeout) - .build(); + int operationTimeout = ((ConnectionImplementation) HTU.getConnection()) + .getConnectionConfiguration().getOperationTimeout(); + int readTimeout = ((ConnectionImplementation) HTU.getConnection()) + .getConnectionConfiguration().getReadRpcTimeout(); + AsyncProcessTask task = + AsyncProcessTask.newBuilder().setPool(HTable.getDefaultExecutor(HTU.getConfiguration())) + .setTableName(table.getName()).setRowAccess(gets).setResults(results) + .setOperationTimeout(operationTimeout).setRpcTimeout(readTimeout).build(); AsyncRequestFuture reqs = ap.submit(task); reqs.waitUntilDone(); // verify we got the right results back diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java index 026010d42a..7a5ff18219 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java @@ -204,7 +204,7 @@ public class TestSeparateClientZKCluster { public void testMetaMoveDuringClientZkClusterRestart() throws Exception { TableName tn = TableName.valueOf(name.getMethodName()); // create table - ClusterConnection conn = (ClusterConnection) TEST_UTIL.getConnection(); + ConnectionImplementation conn = (ConnectionImplementation) TEST_UTIL.getConnection(); Admin admin = conn.getAdmin(); HTable table = (HTable) conn.getTable(tn); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java index beaa59be3e..956539a568 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java @@ -75,7 +75,8 @@ public class TestShortCircuitConnection { htd.addFamily(hcd); UTIL.createTable(htd, null); HRegionServer regionServer = UTIL.getRSForFirstRegionInTable(tableName); - ClusterConnection connection = regionServer.getClusterConnection(); + ConnectionImplementation connection = + (ConnectionImplementation) regionServer.getClusterConnection(); Table tableIf = connection.getTable(tableName); assertTrue(tableIf instanceof HTable); HTable table = (HTable) tableIf; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java index 0562c90347..b24763197c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java @@ -297,7 +297,7 @@ public class TestSnapshotCloneIndependence { */ private void runTestRegionOperationsIndependent() throws Exception { // Verify that region information is the same pre-split - ((ClusterConnection) UTIL.getConnection()).clearRegionCache(); + ((ConnectionImplementation) UTIL.getConnection()).clearRegionCache(); List originalTableHRegions = admin.getTableRegions(originalTableName); final int originalRegionCount = originalTableHRegions.size(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java index a0aae32095..0deea15540 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java @@ -18,16 +18,12 @@ package org.apache.hadoop.hbase.master; import static org.junit.Assert.fail; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import java.net.InetAddress; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClockOutOfSyncException; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.client.ClusterConnection; -import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; @@ -51,14 +47,7 @@ public class TestClockSkewDetection { @Test public void testClockSkewDetection() throws Exception { final Configuration conf = HBaseConfiguration.create(); - ServerManager sm = new ServerManager(new MockNoopMasterServices(conf) { - @Override - public ClusterConnection getClusterConnection() { - ClusterConnection conn = mock(ClusterConnection.class); - when(conn.getRpcControllerFactory()).thenReturn(mock(RpcControllerFactory.class)); - return conn; - } - }); + ServerManager sm = new ServerManager(new MockNoopMasterServices(conf)); LOG.debug("regionServerStartup 1"); InetAddress ia1 = InetAddress.getLocalHost(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java index 446c3f9cb0..0c2532e21a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java @@ -25,8 +25,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartMiniClusterOption; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -58,32 +59,33 @@ public class TestMetaAssignmentWithStopMaster { @Test public void testStopActiveMaster() throws Exception { - ClusterConnection conn = - (ClusterConnection) ConnectionFactory.createConnection(UTIL.getConfiguration()); - ServerName oldMetaServer = conn.locateRegions(TableName.META_TABLE_NAME).get(0).getServerName(); - ServerName oldMaster = UTIL.getMiniHBaseCluster().getMaster().getServerName(); + try (Connection conn = ConnectionFactory.createConnection(UTIL.getConfiguration()); + RegionLocator locator = conn.getRegionLocator(TableName.META_TABLE_NAME)) { + ServerName oldMetaServer = locator.getAllRegionLocations().get(0).getServerName(); + ServerName oldMaster = UTIL.getMiniHBaseCluster().getMaster().getServerName(); - UTIL.getMiniHBaseCluster().getMaster().stop("Stop master for test"); - long startTime = System.currentTimeMillis(); - while (UTIL.getMiniHBaseCluster().getMaster() == null || UTIL.getMiniHBaseCluster().getMaster() - .getServerName().equals(oldMaster)) { - LOG.info("Wait the standby master become active"); - Thread.sleep(3000); - if (System.currentTimeMillis() - startTime > WAIT_TIMEOUT) { - fail("Wait too long for standby master become active"); + UTIL.getMiniHBaseCluster().getMaster().stop("Stop master for test"); + long startTime = System.currentTimeMillis(); + while (UTIL.getMiniHBaseCluster().getMaster() == null || + UTIL.getMiniHBaseCluster().getMaster().getServerName().equals(oldMaster)) { + LOG.info("Wait the standby master become active"); + Thread.sleep(3000); + if (System.currentTimeMillis() - startTime > WAIT_TIMEOUT) { + fail("Wait too long for standby master become active"); + } } - } - startTime = System.currentTimeMillis(); - while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) { - LOG.info("Wait the new active master to be initialized"); - Thread.sleep(3000); - if (System.currentTimeMillis() - startTime > WAIT_TIMEOUT) { - fail("Wait too long for the new active master to be initialized"); + startTime = System.currentTimeMillis(); + while (!UTIL.getMiniHBaseCluster().getMaster().isInitialized()) { + LOG.info("Wait the new active master to be initialized"); + Thread.sleep(3000); + if (System.currentTimeMillis() - startTime > WAIT_TIMEOUT) { + fail("Wait too long for the new active master to be initialized"); + } } - } - ServerName newMetaServer = conn.locateRegions(TableName.META_TABLE_NAME).get(0).getServerName(); - assertTrue("The new meta server " + newMetaServer + " should be same with" + + ServerName newMetaServer = locator.getAllRegionLocations().get(0).getServerName(); + assertTrue("The new meta server " + newMetaServer + " should be same with" + " the old meta server " + oldMetaServer, newMetaServer.equals(oldMetaServer)); + } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java index 98c3978143..6457964cc1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java @@ -359,11 +359,11 @@ public class MasterProcedureTestingUtility { // Procedure Helpers // ========================================================================== public static long generateNonceGroup(final HMaster master) { - return master.getClusterConnection().getNonceGenerator().getNonceGroup(); + return master.getAsyncClusterConnection().getNonceGenerator().getNonceGroup(); } public static long generateNonce(final HMaster master) { - return master.getClusterConnection().getNonceGenerator().newNonce(); + return master.getAsyncClusterConnection().getNonceGenerator().newNonce(); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java index c86f3e11ef..569726c699 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -48,6 +49,7 @@ import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ClientServiceCallable; import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.RpcRetryingCaller; @@ -88,10 +90,6 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; - /** * Tests bulk loading of HFiles and shows the atomicity or lack of atomicity of * the region server's bullkLoad functionality. @@ -225,7 +223,7 @@ public class TestHRegionServerBulkLoad { LOG.debug("Going to connect to server " + getLocation() + " for row " + Bytes.toStringBinary(getRow())); SecureBulkLoadClient secureClient = null; - byte[] regionName = getLocation().getRegionInfo().getRegionName(); + byte[] regionName = getLocation().getRegion().getRegionName(); try (Table table = conn.getTable(getTableName())) { secureClient = new SecureBulkLoadClient(UTIL.getConfiguration(), table); secureClient.secureBulkLoadHFiles(getStub(), famPaths, regionName, @@ -241,23 +239,11 @@ public class TestHRegionServerBulkLoad { // Periodically do compaction to reduce the number of open file handles. if (numBulkLoads.get() % 5 == 0) { // 5 * 50 = 250 open file handles! - callable = new ClientServiceCallable(conn, - tableName, Bytes.toBytes("aaa"), - new RpcControllerFactory(UTIL.getConfiguration()).newController(), HConstants.PRIORITY_UNSET) { - @Override - protected Void rpcCall() throws Exception { - LOG.debug("compacting " + getLocation() + " for row " - + Bytes.toStringBinary(getRow())); - AdminProtos.AdminService.BlockingInterface server = - conn.getAdmin(getLocation().getServerName()); - CompactRegionRequest request = RequestConverter.buildCompactRegionRequest( - getLocation().getRegionInfo().getRegionName(), true, null); - server.compactRegion(null, request); - numCompactions.incrementAndGet(); - return null; - } - }; - caller.callWithRetries(callable, Integer.MAX_VALUE); + try (RegionLocator locator = conn.getRegionLocator(tableName)) { + HRegionLocation loc = locator.getRegionLocation(Bytes.toBytes("aaa"), true); + conn.getAdmin().compactRegion(loc.getRegion().getRegionName()); + numCompactions.incrementAndGet(); + } } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldClient.java index de01401d26..02c53356e8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldClient.java @@ -25,11 +25,13 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ClientServiceCallable; import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.RpcRetryingCaller; import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; @@ -47,8 +49,6 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest; /** @@ -107,7 +107,7 @@ public class TestHRegionServerBulkLoadWithOldClient extends TestHRegionServerBul @Override protected Void rpcCall() throws Exception { LOG.info("Non-secure old client"); - byte[] regionName = getLocation().getRegionInfo().getRegionName(); + byte[] regionName = getLocation().getRegion().getRegionName(); BulkLoadHFileRequest request = RequestConverter .buildBulkLoadHFileRequest(famPaths, regionName, true, null, null); @@ -122,23 +122,12 @@ public class TestHRegionServerBulkLoadWithOldClient extends TestHRegionServerBul // Periodically do compaction to reduce the number of open file handles. if (numBulkLoads.get() % 5 == 0) { // 5 * 50 = 250 open file handles! - callable = new ClientServiceCallable(conn, tableName, - Bytes.toBytes("aaa"), rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { - @Override - protected Void rpcCall() throws Exception { - LOG.debug("compacting " + getLocation() + " for row " - + Bytes.toStringBinary(getRow())); - AdminProtos.AdminService.BlockingInterface server = - conn.getAdmin(getLocation().getServerName()); - CompactRegionRequest request = - RequestConverter.buildCompactRegionRequest( - getLocation().getRegionInfo().getRegionName(), true, null); - server.compactRegion(null, request); - numCompactions.incrementAndGet(); - return null; - } - }; - caller.callWithRetries(callable, Integer.MAX_VALUE); + // 5 * 50 = 250 open file handles! + try (RegionLocator locator = conn.getRegionLocator(tableName)) { + HRegionLocation loc = locator.getRegionLocation(Bytes.toBytes("aaa"), true); + conn.getAdmin().compactRegion(loc.getRegion().getRegionName()); + numCompactions.incrementAndGet(); + } } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java index fd9ff2924b..b504b16e07 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -49,6 +50,7 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Hbck; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java index a4b99a11b7..716340a777 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestLoadIncrementalHFilesSplitRecovery.java @@ -33,25 +33,21 @@ import java.util.Map; import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.IntStream; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ClientServiceCallable; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -76,17 +72,12 @@ import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Multimap; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; -import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest; /** * Test cases for the atomic load error handling of the bulk load functionality. @@ -294,7 +285,7 @@ public class TestLoadIncrementalHFilesSplitRecovery { if (i == 1) { Connection errConn; try { - errConn = getMockedConnection(util.getConfiguration()); + errConn = HConnectionTestingUtility.getMockedConnection(util.getConfiguration()); serviceCallable = this.buildClientServiceCallable(errConn, table, first, lqis, true); } catch (Exception e) { LOG.error(HBaseMarkers.FATAL, "mocking cruft, should never happen", e); @@ -364,28 +355,6 @@ public class TestLoadIncrementalHFilesSplitRecovery { util.getConfiguration().setBoolean(LoadIncrementalHFiles.RETRY_ON_IO_EXCEPTION, false); } - private ClusterConnection getMockedConnection(final Configuration conf) - throws IOException, org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { - ClusterConnection c = Mockito.mock(ClusterConnection.class); - Mockito.when(c.getConfiguration()).thenReturn(conf); - Mockito.doNothing().when(c).close(); - // Make it so we return a particular location when asked. - final HRegionLocation loc = new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, - ServerName.valueOf("example.org", 1234, 0)); - Mockito.when( - c.getRegionLocation((TableName) Mockito.any(), (byte[]) Mockito.any(), Mockito.anyBoolean())) - .thenReturn(loc); - Mockito.when(c.locateRegion((TableName) Mockito.any(), (byte[]) Mockito.any())).thenReturn(loc); - ClientProtos.ClientService.BlockingInterface hri = - Mockito.mock(ClientProtos.ClientService.BlockingInterface.class); - Mockito - .when( - hri.bulkLoadHFile((RpcController) Mockito.any(), (BulkLoadHFileRequest) Mockito.any())) - .thenThrow(new ServiceException(new IOException("injecting bulk load error"))); - Mockito.when(c.getClient(Mockito.any())).thenReturn(hri); - return c; - } - /** * This test exercises the path where there is a split after initial validation but before the * atomic bulk load call. We cannot use presplitting to test this path, so we actually inject a diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java index fc23d51e2a..c740bed71d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java @@ -74,9 +74,6 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; - /** * This is the base class for HBaseFsck's ability to detect reasons for inconsistent tables. * @@ -298,17 +295,12 @@ public class BaseTestHBaseFsck { /** * delete table in preparation for next test - * - * @param tablename - * @throws IOException */ void cleanupTable(TableName tablename) throws Exception { if (tbl != null) { tbl.close(); tbl = null; } - - ((ClusterConnection) connection).clearRegionCache(); deleteTable(TEST_UTIL, tablename); } @@ -320,10 +312,8 @@ public class BaseTestHBaseFsck { Collection regionServers = status.getLiveServerMetrics().keySet(); Map> mm = new HashMap<>(); for (ServerName hsi : regionServers) { - AdminProtos.AdminService.BlockingInterface server = connection.getAdmin(hsi); - // list all online regions from this region server - List regions = ProtobufUtil.getOnlineRegions(server); + List regions = admin.getRegions(hsi); List regionNames = new ArrayList<>(regions.size()); for (RegionInfo hri : regions) { regionNames.add(hri.getRegionNameAsString()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java index 68643663fc..6bf64c96f3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedReader.java @@ -27,7 +27,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; - +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Consistency; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; @@ -377,8 +377,10 @@ public class MultiThreadedReader extends MultiThreadedAction numKeysVerified.incrementAndGet(); } } else { - HRegionLocation hloc = connection.getRegionLocation(tableName, - get.getRow(), false); + HRegionLocation hloc; + try (RegionLocator locator = connection.getRegionLocator(tableName)) { + hloc = locator.getRegionLocation(get.getRow()); + } String rowKey = Bytes.toString(get.getRow()); LOG.info("Key = " + rowKey + ", Region location: " + hloc); if(isNullExpected) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java index 54be0d3f88..7547b0b7d7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java @@ -31,6 +31,7 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator; import org.slf4j.Logger; @@ -97,9 +98,9 @@ public abstract class MultiThreadedWriterBase extends MultiThreadedAction { protected String getRegionDebugInfoSafe(Table table, byte[] rowKey) { HRegionLocation cached = null, real = null; - try { - cached = connection.getRegionLocation(tableName, rowKey, false); - real = connection.getRegionLocation(tableName, rowKey, true); + try (RegionLocator locator = connection.getRegionLocator(tableName)){ + cached = locator.getRegionLocation(rowKey, false); + real = locator.getRegionLocation(rowKey, true); } catch (Throwable t) { // Cannot obtain region information for another catch block - too bad! } diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java index a0f902a811..f747c3ac8e 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java @@ -30,11 +30,13 @@ import java.util.concurrent.ExecutorService; import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.BufferedMutatorParams; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Hbck; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableBuilder; diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java index 158361958f..24a56a584c 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java @@ -41,11 +41,11 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; @@ -143,7 +143,7 @@ public class TestThriftConnection { private static Connection createConnection(int port, boolean useHttp) throws IOException { Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); - conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, + conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, ThriftConnection.class.getName()); if (useHttp) { conf.set(Constants.HBASE_THRIFT_CLIENT_BUIDLER_CLASS, -- 2.17.1