From c9096f583e6640324efe262f30f4894bf2acb88e Mon Sep 17 00:00:00 2001 From: zhangduo Date: Wed, 6 Feb 2019 19:54:14 +0800 Subject: [PATCH] HBASE-21585 Remove ClusterConnection --- .../hbase/client/BufferedMutatorImpl.java | 3 +- .../CancellableRegionServerCallable.java | 6 +- .../hadoop/hbase/client/ClientScanner.java | 16 +- .../hbase/client/ClientServiceCallable.java | 12 +- .../hbase/client/ClusterConnection.java | 183 ------------------ .../hbase/client/ConnectionFactory.java | 2 +- .../client/ConnectionImplementation.java | 140 ++++++++++---- .../hadoop/hbase/client/ConnectionUtils.java | 12 +- .../hadoop/hbase/client/HBaseAdmin.java | 64 +++--- .../apache/hadoop/hbase/client/HBaseHbck.java | 17 +- .../apache/hadoop/hbase/client/HTable.java | 27 ++- .../org/apache/hadoop/hbase/client/Hbck.java | 4 +- .../hadoop/hbase/client/MasterCallable.java | 7 +- .../hbase/client/MultiServerCallable.java | 12 +- .../client/NoncedRegionServerCallable.java | 4 +- .../client/RegionCoprocessorRpcChannel.java | 4 +- .../hbase/client/RegionServerCallable.java | 12 +- .../hbase/client/ReversedClientScanner.java | 7 - .../hbase/client/ReversedScannerCallable.java | 8 +- .../hadoop/hbase/client/ScannerCallable.java | 11 +- .../client/ScannerCallableWithReplicas.java | 2 +- .../hadoop/hbase/client/TestAsyncProcess.java | 7 +- .../hbase/client/TestBufferedMutator.java | 3 +- ...onServerBulkLoadWithOldSecureEndpoint.java | 69 ++----- .../hadoop/hbase/DistributedHBaseCluster.java | 27 +-- .../TestMultiTableInputFormatBase.java | 4 +- .../mapreduce/TestTableInputFormatBase.java | 10 +- .../java/org/apache/hadoop/hbase/Server.java | 9 - .../backup/example/ZKTableArchiveClient.java | 11 +- .../hbase/client/AsyncClusterConnection.java | 2 +- .../apache/hadoop/hbase/master/HMaster.java | 10 +- .../assignment/AssignmentManagerUtil.java | 20 +- .../quotas/RegionServerSpaceQuotaManager.java | 2 +- ...isableTableViolationPolicyEnforcement.java | 16 +- .../hbase/regionserver/HRegionServer.java | 39 ++-- .../regionserver/DumpReplicationQueues.java | 4 +- .../regionserver/ReplicationSyncUp.java | 8 +- .../apache/hadoop/hbase/util/HBaseFsck.java | 39 ++-- .../hadoop/hbase/util/HBaseFsckRepair.java | 1 - .../hadoop/hbase/util/MultiHConnection.java | 15 +- .../hadoop/hbase/util/RegionSplitter.java | 15 +- .../resources/hbase-webapps/master/table.jsp | 2 +- .../org/apache/hadoop/hbase/HBaseCluster.java | 23 --- .../apache/hadoop/hbase/MiniHBaseCluster.java | 24 --- .../hbase/MockRegionServerServices.java | 10 +- .../TestZooKeeperTableArchiveClient.java | 6 +- .../client/HConnectionTestingUtility.java | 20 +- .../hadoop/hbase/client/TestAdmin1.java | 29 ++- .../hadoop/hbase/client/TestAdmin2.java | 2 +- .../hbase/client/TestAsyncTableAdminApi.java | 2 +- .../hadoop/hbase/client/TestCISleep.java | 12 +- .../hbase/client/TestClientPushback.java | 6 +- .../client/TestConnectionImplementation.java | 5 +- .../hbase/client/TestFromClientSide3.java | 6 +- .../TestMetaTableAccessorNoCluster.java | 10 +- .../hbase/client/TestMetaWithReplicas.java | 37 ++-- .../hbase/client/TestMultiParallel.java | 5 +- .../hbase/client/TestReplicasClient.java | 22 +-- .../client/TestSeparateClientZKCluster.java | 2 +- .../client/TestShortCircuitConnection.java | 2 +- .../hbase/master/MockNoopMasterServices.java | 8 +- .../hadoop/hbase/master/MockRegionServer.java | 8 +- .../hbase/master/TestActiveMasterManager.java | 9 +- .../hbase/master/TestClockSkewDetection.java | 13 +- .../hbase/master/TestMasterNoCluster.java | 13 +- .../master/assignment/MockMasterServices.java | 6 +- .../master/cleaner/TestHFileCleaner.java | 9 +- .../master/cleaner/TestHFileLinkCleaner.java | 10 +- .../hbase/master/cleaner/TestLogsCleaner.java | 8 +- .../cleaner/TestReplicationHFileCleaner.java | 9 +- .../MasterProcedureTestingUtility.java | 4 +- .../TestHRegionServerBulkLoad.java | 35 +--- ...estHRegionServerBulkLoadWithOldClient.java | 69 ++----- .../regionserver/TestHeapMemoryManager.java | 9 +- .../regionserver/TestSplitLogWorker.java | 9 +- .../hbase/regionserver/TestWALLockup.java | 8 +- .../TestReplicationTrackerZKImpl.java | 9 +- .../TestReplicationSourceManager.java | 10 +- .../token/TestTokenAuthentication.java | 9 +- .../hadoop/hbase/util/BaseTestHBaseFsck.java | 13 +- .../apache/hadoop/hbase/util/MockServer.java | 10 +- .../hbase/util/MultiThreadedAction.java | 6 +- .../hadoop/hbase/util/TestHBaseFsckMOB.java | 3 +- .../thrift2/client/ThriftConnection.java | 2 - .../hbase/thrift2/TestThriftConnection.java | 4 +- 85 files changed, 462 insertions(+), 920 deletions(-) delete mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java index 9740ce2cc3..4b370b22c8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java @@ -95,7 +95,8 @@ public class BufferedMutatorImpl implements BufferedMutator { private final AsyncProcess ap; @VisibleForTesting - BufferedMutatorImpl(ClusterConnection conn, BufferedMutatorParams params, AsyncProcess ap) { + BufferedMutatorImpl(ConnectionImplementation conn, BufferedMutatorParams params, + AsyncProcess ap) { if (conn == null || conn.isClosed()) { throw new IllegalArgumentException("Connection is null or closed."); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java index 6ad9254e35..f81018ea15 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java @@ -39,8 +39,10 @@ abstract class CancellableRegionServerCallable extends ClientServiceCallable< Cancellable { private final RetryingTimeTracker tracker; private final int rpcTimeout; - CancellableRegionServerCallable(Connection connection, TableName tableName, byte[] row, - RpcController rpcController, int rpcTimeout, RetryingTimeTracker tracker, int priority) { + + CancellableRegionServerCallable(ConnectionImplementation connection, TableName tableName, + byte[] row, RpcController rpcController, int rpcTimeout, RetryingTimeTracker tracker, + int priority) { super(connection, tableName, row, rpcController, priority); this.rpcTimeout = rpcTimeout; this.tracker = tracker; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java index ee95a24498..0b5bb2cb5c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java @@ -21,32 +21,31 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.calcEstimatedSize; import static org.apache.hadoop.hbase.client.ConnectionUtils.createScanResultCache; import static org.apache.hadoop.hbase.client.ConnectionUtils.incRegionCountMetrics; -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; - import java.io.IOException; import java.io.InterruptedIOException; import java.util.ArrayDeque; import java.util.Queue; import java.util.concurrent.ExecutorService; - import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.client.ScannerCallable.MoreResults; import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException; -import org.apache.hadoop.hbase.exceptions.ScannerResetException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.NotServingRegionException; -import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownScannerException; +import org.apache.hadoop.hbase.client.ScannerCallable.MoreResults; +import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException; +import org.apache.hadoop.hbase.exceptions.ScannerResetException; +import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** @@ -94,7 +93,6 @@ public abstract class ClientScanner extends AbstractClientScanner { * @param scan {@link Scan} to use in this scanner * @param tableName The table that we wish to scan * @param connection Connection identifying the cluster - * @throws IOException */ public ClientScanner(final Configuration conf, final Scan scan, final TableName tableName, ConnectionImplementation connection, RpcRetryingCallerFactory rpcFactory, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java index 67ba838cef..c7006a87da 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java @@ -31,12 +31,12 @@ import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; * @param */ @InterfaceAudience.Private -public abstract class ClientServiceCallable extends - RegionServerCallable { +public abstract class ClientServiceCallable + extends RegionServerCallable { - public ClientServiceCallable(Connection connection, TableName tableName, byte[] row, + public ClientServiceCallable(ConnectionImplementation connection, TableName tableName, byte[] row, RpcController rpcController, int priority) { - super((ConnectionImplementation) connection, tableName, row, rpcController, priority); + super(connection, tableName, row, rpcController, priority); } @Override @@ -46,12 +46,12 @@ public abstract class ClientServiceCallable extends // Below here are simple methods that contain the stub and the rpcController. protected ClientProtos.GetResponse doGet(ClientProtos.GetRequest request) - throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { + throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { return getStub().get(getRpcController(), request); } protected ClientProtos.MutateResponse doMutate(ClientProtos.MutateRequest request) - throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { + throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { return getStub().mutate(getRpcController(), request); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java deleted file mode 100644 index 7828ef0037..0000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java +++ /dev/null @@ -1,183 +0,0 @@ -/** - * - - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.IOException; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.MasterNotRunningException; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy; -import org.apache.hadoop.hbase.ipc.RpcControllerFactory; -import org.apache.yetus.audience.InterfaceAudience; - -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; - -/** Internal methods on Connection that should not be used by user code. */ -@InterfaceAudience.Private -// NOTE: Although this class is public, this class is meant to be used directly from internal -// classes and unit tests only. -public interface ClusterConnection extends Connection { - - /** - * Key for configuration in Configuration whose value is the class we implement making a - * new Connection instance. - */ - String HBASE_CLIENT_CONNECTION_IMPL = "hbase.client.connection.impl"; - - /** - * @return - true if the master server is running - * @deprecated this has been deprecated without a replacement - */ - @Deprecated - boolean isMasterRunning() - throws MasterNotRunningException, ZooKeeperConnectionException; - - /** - * Use this api to check if the table has been created with the specified number of - * splitkeys which was used while creating the given table. - * Note : If this api is used after a table's region gets splitted, the api may return - * false. - * @param tableName - * tableName - * @param splitKeys - * splitKeys used while creating table - * @throws IOException - * if a remote or network exception occurs - */ - boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws - IOException; - - /** - * A table that isTableEnabled == false and isTableDisabled == false - * is possible. This happens when a table has a lot of regions - * that must be processed. - * @param tableName table name - * @return true if the table is enabled, false otherwise - * @throws IOException if a remote or network exception occurs - */ - boolean isTableEnabled(TableName tableName) throws IOException; - - /** - * @param tableName table name - * @return true if the table is disabled, false otherwise - * @throws IOException if a remote or network exception occurs - */ - boolean isTableDisabled(TableName tableName) throws IOException; - - /** - * Retrieve TableState, represent current table state. - * @param tableName table state for - * @return state of the table - */ - TableState getTableState(TableName tableName) throws IOException; - - /** - * Returns a {@link MasterKeepAliveConnection} to the active master - */ - MasterKeepAliveConnection getMaster() throws IOException; - - /** - * Get the admin service for master. - */ - AdminService.BlockingInterface getAdminForMaster() throws IOException; - - /** - * Establishes a connection to the region server at the specified address. - * @param serverName the region server to connect to - * @return proxy for HRegionServer - * @throws IOException if a remote or network exception occurs - */ - AdminService.BlockingInterface getAdmin(final ServerName serverName) throws IOException; - - /** - * Establishes a connection to the region server at the specified address, and returns - * a region client protocol. - * - * @param serverName the region server to connect to - * @return ClientProtocol proxy for RegionServer - * @throws IOException if a remote or network exception occurs - * - */ - ClientService.BlockingInterface getClient(final ServerName serverName) throws IOException; - - /** - * @return Nonce generator for this ClusterConnection; may be null if disabled in configuration. - */ - NonceGenerator getNonceGenerator(); - - /** - * @return Default AsyncProcess associated with this connection. - */ - AsyncProcess getAsyncProcess(); - - /** - * Returns a new RpcRetryingCallerFactory from the given {@link Configuration}. - * This RpcRetryingCallerFactory lets the users create {@link RpcRetryingCaller}s which can be - * intercepted with the configured {@link RetryingCallerInterceptor} - * @param conf configuration - * @return RpcRetryingCallerFactory - */ - RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf); - - /** - * @return Connection's RpcRetryingCallerFactory instance - */ - RpcRetryingCallerFactory getRpcRetryingCallerFactory(); - - /** - * @return Connection's RpcControllerFactory instance - */ - RpcControllerFactory getRpcControllerFactory(); - - /** - * @return a ConnectionConfiguration object holding parsed configuration values - */ - ConnectionConfiguration getConnectionConfiguration(); - - /** - * @return the current statistics tracker associated with this connection - */ - ServerStatisticTracker getStatisticsTracker(); - - /** - * @return the configured client backoff policy - */ - ClientBackoffPolicy getBackoffPolicy(); - - /** - * @return the MetricsConnection instance associated with this connection. - */ - MetricsConnection getConnectionMetrics(); - - /** - * @return true when this connection uses a {@link org.apache.hadoop.hbase.codec.Codec} and so - * supports cell blocks. - */ - boolean hasCellBlockSupport(); - - /** - * @return the number of region servers that are currently running - * @throws IOException if a remote or network exception occurs - */ - int getCurrentNrHRS() throws IOException; -} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index d4e50edef5..bcb5d1ab50 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -212,7 +212,7 @@ public class ConnectionFactory { */ public static Connection createConnection(Configuration conf, ExecutorService pool, final User user) throws IOException { - String className = conf.get(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, + String className = conf.get(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, ConnectionImplementation.class.getName()); Class clazz; try { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index c72d97dd01..307e8e0bee 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -146,7 +146,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Updat value="AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION", justification="Access to the conncurrent hash map is under a lock so should be fine.") @InterfaceAudience.Private -class ConnectionImplementation implements ClusterConnection, Closeable { +class ConnectionImplementation implements Connection, Closeable { public static final String RETRIES_BY_SERVER_KEY = "hbase.client.retries.by.server"; private static final Logger LOG = LoggerFactory.getLogger(ConnectionImplementation.class); @@ -348,9 +348,8 @@ class ConnectionImplementation implements ClusterConnection, Closeable { */ @VisibleForTesting static NonceGenerator injectNonceGeneratorForTesting( - ClusterConnection conn, NonceGenerator cnm) { - ConnectionImplementation connImpl = (ConnectionImplementation)conn; - NonceGenerator ng = connImpl.getNonceGenerator(); + ConnectionImplementation conn, NonceGenerator cnm) { + NonceGenerator ng = conn.getNonceGenerator(); LOG.warn("Nonce generator is being replaced by test code for " + cnm.getClass().getName()); nonceGenerator = cnm; @@ -450,7 +449,9 @@ class ConnectionImplementation implements ClusterConnection, Closeable { }), rpcControllerFactory); } - @Override + /** + * @return the MetricsConnection instance associated with this connection. + */ public MetricsConnection getConnectionMetrics() { return this.metrics; } @@ -594,7 +595,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { * @deprecated this has been deprecated without a replacement */ @Deprecated - @Override public boolean isMasterRunning() throws MasterNotRunningException, ZooKeeperConnectionException { // When getting the master connection, we check it's running, // so if there is no exception, it means we've been able to get a @@ -622,18 +622,39 @@ class ConnectionImplementation implements ClusterConnection, Closeable { return reload ? relocateRegion(tableName, row) : locateRegion(tableName, row); } - - @Override + /** + * A table that isTableEnabled == false and isTableDisabled == false + * is possible. This happens when a table has a lot of regions + * that must be processed. + * @param tableName table name + * @return true if the table is enabled, false otherwise + * @throws IOException if a remote or network exception occurs + */ public boolean isTableEnabled(TableName tableName) throws IOException { return getTableState(tableName).inStates(TableState.State.ENABLED); } - @Override + /** + * @param tableName table name + * @return true if the table is disabled, false otherwise + * @throws IOException if a remote or network exception occurs + */ public boolean isTableDisabled(TableName tableName) throws IOException { return getTableState(tableName).inStates(TableState.State.DISABLED); } - @Override + /** + * Use this api to check if the table has been created with the specified number of + * splitkeys which was used while creating the given table. + * Note : If this api is used after a table's region gets splitted, the api may return + * false. + * @param tableName + * tableName + * @param splitKeys + * splitKeys used while creating table + * @throws IOException + * if a remote or network exception occurs + */ public boolean isTableAvailable(final TableName tableName, @Nullable final byte[][] splitKeys) throws IOException { checkClosed(); @@ -803,15 +824,14 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } /** - * - * @param tableName table to get regions of - * @param row the row - * @param useCache Should we use the cache to retrieve the region information. - * @param retry do we retry - * @param replicaId the replicaId for the region - * @return region locations for this row. - * @throws IOException if IO failure occurs - */ + * @param tableName table to get regions of + * @param row the row + * @param useCache Should we use the cache to retrieve the region information. + * @param retry do we retry + * @param replicaId the replicaId for the region + * @return region locations for this row. + * @throws IOException if IO failure occurs + */ RegionLocations locateRegion(final TableName tableName, final byte[] row, boolean useCache, boolean retry, int replicaId) throws IOException { checkClosed(); @@ -1042,6 +1062,10 @@ class ConnectionImplementation implements ClusterConnection, Closeable { metaCache.clearCache(serverName); } + + /** + * Allows flushing the region cache. + */ @Override public void clearRegionLocationCache() { metaCache.clearCache(); @@ -1252,12 +1276,19 @@ class ConnectionImplementation implements ClusterConnection, Closeable { } } - @Override + /** + * Get the admin service for master. + */ public AdminProtos.AdminService.BlockingInterface getAdminForMaster() throws IOException { return getAdmin(get(registry.getMasterAddress())); } - @Override + /** + * Establishes a connection to the region server at the specified address. + * @param serverName the region server to connect to + * @return proxy for HRegionServer + * @throws IOException if a remote or network exception occurs + */ public AdminProtos.AdminService.BlockingInterface getAdmin(ServerName serverName) throws IOException { checkClosed(); @@ -1273,7 +1304,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable { }); } - @Override + /** + * Establishes a connection to the region server at the specified address, and returns a region + * client protocol. + * @param serverName the region server to connect to + * @return ClientProtocol proxy for RegionServer + * @throws IOException if a remote or network exception occurs + */ public BlockingInterface getClient(ServerName serverName) throws IOException { checkClosed(); if (isDeadServer(serverName)) { @@ -1283,14 +1320,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable { serverName, this.hostnamesCanChange); return (ClientProtos.ClientService.BlockingInterface) computeIfAbsentEx(stubs, key, () -> { BlockingRpcChannel channel = - this.rpcClient.createBlockingRpcChannel(serverName, user, rpcTimeout); + this.rpcClient.createBlockingRpcChannel(serverName, user, rpcTimeout); return ClientProtos.ClientService.newBlockingStub(channel); }); } final MasterServiceState masterServiceState = new MasterServiceState(this); - @Override public MasterKeepAliveConnection getMaster() throws IOException { return getKeepAliveMasterService(); } @@ -1896,6 +1932,10 @@ class ConnectionImplementation implements ClusterConnection, Closeable { cacheLocation(hri.getTable(), source, newHrl); } + /** + * Deletes cached locations for the specific region. + * @param location The location object for the region, to be purged from cache. + */ void deleteCachedRegionLocation(final HRegionLocation location) { metaCache.clearCache(location); } @@ -1974,17 +2014,23 @@ class ConnectionImplementation implements ClusterConnection, Closeable { metaCache.clearCache(regionInfo); } - @Override + /** + * @return Default AsyncProcess associated with this connection. + */ public AsyncProcess getAsyncProcess() { return asyncProcess; } - @Override + /** + * @return the current statistics tracker associated with this connection + */ public ServerStatisticTracker getStatisticsTracker() { return this.stats; } - @Override + /** + * @return the configured client backoff policy + */ public ClientBackoffPolicy getBackoffPolicy() { return this.backoffPolicy; } @@ -2020,7 +2066,10 @@ class ConnectionImplementation implements ClusterConnection, Closeable { return this.aborted; } - @Override + /** + * @return the number of region servers that are currently running + * @throws IOException if a remote or network exception occurs + */ public int getCurrentNrHRS() throws IOException { return get(this.registry.getCurrentNrHRS()); } @@ -2063,12 +2112,18 @@ class ConnectionImplementation implements ClusterConnection, Closeable { close(); } - @Override + /** + * @return Nonce generator for this ClusterConnection; may be null if disabled in configuration. + */ public NonceGenerator getNonceGenerator() { return nonceGenerator; } - @Override + /** + * Retrieve TableState, represent current table state. + * @param tableName table state for + * @return state of the table + */ public TableState getTableState(TableName tableName) throws IOException { checkClosed(); TableState tableState = MetaTableAccessor.getTableState(this, tableName); @@ -2078,28 +2133,43 @@ class ConnectionImplementation implements ClusterConnection, Closeable { return tableState; } - @Override + /** + * Returns a new RpcRetryingCallerFactory from the given {@link Configuration}. + * This RpcRetryingCallerFactory lets the users create {@link RpcRetryingCaller}s which can be + * intercepted with the configured {@link RetryingCallerInterceptor} + * @param conf configuration + * @return RpcRetryingCallerFactory + */ public RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf) { return RpcRetryingCallerFactory .instantiate(conf, this.interceptor, this.getStatisticsTracker()); } - @Override + /** + * @return true when this connection uses a {@link org.apache.hadoop.hbase.codec.Codec} and so + * supports cell blocks. + */ public boolean hasCellBlockSupport() { return this.rpcClient.hasCellBlockSupport(); } - @Override + /** + * @return a ConnectionConfiguration object holding parsed configuration values + */ public ConnectionConfiguration getConnectionConfiguration() { return this.connectionConfig; } - @Override + /** + * @return Connection's RpcRetryingCallerFactory instance + */ public RpcRetryingCallerFactory getRpcRetryingCallerFactory() { return this.rpcCallerFactory; } - @Override + /** + * @return Connection's RpcControllerFactory instance + */ public RpcControllerFactory getRpcControllerFactory() { return this.rpcControllerFactory; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java index 27079e89b0..34c2ec077f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java @@ -74,6 +74,12 @@ public final class ConnectionUtils { private static final Logger LOG = LoggerFactory.getLogger(ConnectionUtils.class); + /** + * Key for configuration in Configuration whose value is the class we implement making a new + * Connection instance. + */ + public static final String HBASE_CLIENT_CONNECTION_IMPL = "hbase.client.connection.impl"; + private ConnectionUtils() { } @@ -103,7 +109,7 @@ public final class ConnectionUtils { * @param cnm Replaces the nonce generator used, for testing. * @return old nonce generator. */ - public static NonceGenerator injectNonceGeneratorForTesting(ClusterConnection conn, + public static NonceGenerator injectNonceGeneratorForTesting(ConnectionImplementation conn, NonceGenerator cnm) { return ConnectionImplementation.injectNonceGeneratorForTesting(conn, cnm); } @@ -180,7 +186,7 @@ public final class ConnectionUtils { * @return an short-circuit connection. * @throws IOException if IO failure occurred */ - public static ClusterConnection createShortCircuitConnection(final Configuration conf, + public static ConnectionImplementation createShortCircuitConnection(final Configuration conf, ExecutorService pool, User user, final ServerName serverName, final AdminService.BlockingInterface admin, final ClientService.BlockingInterface client) throws IOException { @@ -196,7 +202,7 @@ public final class ConnectionUtils { */ @VisibleForTesting public static void setupMasterlessConnection(Configuration conf) { - conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, MasterlessConnection.class.getName()); + conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, MasterlessConnection.class.getName()); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index a447b0ca2e..ccb05588b2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -452,7 +452,7 @@ public class HBaseAdmin implements Admin { /** @return Connection used by this object. */ @Override - public Connection getConnection() { + public ConnectionImplementation getConnection() { return connection; } @@ -544,23 +544,24 @@ public class HBaseAdmin implements Admin { operationTimeout, rpcTimeout); } - static TableDescriptor getTableDescriptor(final TableName tableName, Connection connection, - RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory, - int operationTimeout, int rpcTimeout) throws IOException { + static TableDescriptor getTableDescriptor(final TableName tableName, + ConnectionImplementation connection, RpcRetryingCallerFactory rpcCallerFactory, + final RpcControllerFactory rpcControllerFactory, int operationTimeout, int rpcTimeout) + throws IOException { if (tableName == null) return null; TableDescriptor td = - executeCallable(new MasterCallable(connection, rpcControllerFactory) { - @Override - protected TableDescriptor rpcCall() throws Exception { - GetTableDescriptorsRequest req = + executeCallable(new MasterCallable(connection, rpcControllerFactory) { + @Override + protected TableDescriptor rpcCall() throws Exception { + GetTableDescriptorsRequest req = RequestConverter.buildGetTableDescriptorsRequest(tableName); - GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); - if (!htds.getTableSchemaList().isEmpty()) { - return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0)); + GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); + if (!htds.getTableSchemaList().isEmpty()) { + return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0)); + } + return null; } - return null; - } - }, rpcCallerFactory, operationTimeout, rpcTimeout); + }, rpcCallerFactory, operationTimeout, rpcTimeout); if (td != null) { return td; } @@ -573,26 +574,27 @@ public class HBaseAdmin implements Admin { * Connection, RpcRetryingCallerFactory,RpcControllerFactory,int,int)} */ @Deprecated - static HTableDescriptor getHTableDescriptor(final TableName tableName, Connection connection, - RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory, - int operationTimeout, int rpcTimeout) throws IOException { + static HTableDescriptor getHTableDescriptor(final TableName tableName, + ConnectionImplementation connection, RpcRetryingCallerFactory rpcCallerFactory, + final RpcControllerFactory rpcControllerFactory, int operationTimeout, int rpcTimeout) + throws IOException { if (tableName == null) { return null; } HTableDescriptor htd = - executeCallable(new MasterCallable(connection, rpcControllerFactory) { - @Override - protected HTableDescriptor rpcCall() throws Exception { - GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest(tableName); - GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); - if (!htds.getTableSchemaList().isEmpty()) { - return new ImmutableHTableDescriptor( - ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0))); - } - return null; + executeCallable(new MasterCallable(connection, rpcControllerFactory) { + @Override + protected HTableDescriptor rpcCall() throws Exception { + GetTableDescriptorsRequest req = + RequestConverter.buildGetTableDescriptorsRequest(tableName); + GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); + if (!htds.getTableSchemaList().isEmpty()) { + return new ImmutableHTableDescriptor( + ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0))); } - }, rpcCallerFactory, operationTimeout, rpcTimeout); + return null; + } + }, rpcCallerFactory, operationTimeout, rpcTimeout); if (htd != null) { return new ImmutableHTableDescriptor(htd); } @@ -2421,8 +2423,8 @@ public class HBaseAdmin implements Admin { // Check ZK first. // If the connection exists, we may have a connection to ZK that does not work anymore - try (ClusterConnection connection = - (ClusterConnection) ConnectionFactory.createConnection(copyOfConf)) { + try (ConnectionImplementation connection = + (ConnectionImplementation) ConnectionFactory.createConnection(copyOfConf)) { // can throw MasterNotRunningException connection.isMasterRunning(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java index a276017b0c..e7d0e5968a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseHbck.java @@ -21,9 +21,14 @@ import java.io.IOException; import java.util.List; import java.util.concurrent.Callable; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @@ -31,16 +36,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.HbckService.BlockingInterface; -import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; - -import org.apache.yetus.audience.InterfaceAudience; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** - * Use {@link ClusterConnection#getHbck()} to obtain an instance of {@link Hbck} instead of + * Use {@link Connection#getHbck()} to obtain an instance of {@link Hbck} instead of * constructing an HBaseHbck directly. * *

Connection should be an unmanaged connection obtained via @@ -55,7 +53,6 @@ import org.slf4j.LoggerFactory; * by each thread. Pooling or caching of the instance is not recommended.

* * @see ConnectionFactory - * @see ClusterConnection * @see Hbck */ @InterfaceAudience.Private diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index c77c216441..b17a670363 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -156,7 +156,6 @@ public class HTable implements Table { * @param rpcControllerFactory The RPC controller factory * @param pool ExecutorService to be used. */ - @InterfaceAudience.Private protected HTable(final ConnectionImplementation connection, final TableBuilderBase builder, final RpcRetryingCallerFactory rpcCallerFactory, @@ -463,22 +462,18 @@ public class HTable implements Table { } public static void doBatchWithCallback(List actions, Object[] results, - Callback callback, ClusterConnection connection, ExecutorService pool, TableName tableName) - throws InterruptedIOException, RetriesExhaustedWithDetailsException { - int operationTimeout = connection.getConnectionConfiguration().getOperationTimeout(); + Callback callback, Connection connection, ExecutorService pool, TableName tableName) + throws InterruptedIOException, RetriesExhaustedWithDetailsException { + ConnectionImplementation connImpl = (ConnectionImplementation) connection; + int operationTimeout = connImpl.getConnectionConfiguration().getOperationTimeout(); int writeTimeout = connection.getConfiguration().getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, - connection.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, - HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); - AsyncProcessTask task = AsyncProcessTask.newBuilder(callback) - .setPool(pool) - .setTableName(tableName) - .setRowAccess(actions) - .setResults(results) - .setOperationTimeout(operationTimeout) - .setRpcTimeout(writeTimeout) - .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL) - .build(); - AsyncRequestFuture ars = connection.getAsyncProcess().submit(task); + connection.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, + HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); + AsyncProcessTask task = + AsyncProcessTask.newBuilder(callback).setPool(pool).setTableName(tableName) + .setRowAccess(actions).setResults(results).setOperationTimeout(operationTimeout) + .setRpcTimeout(writeTimeout).setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).build(); + AsyncRequestFuture ars = connImpl.getAsyncProcess().submit(task); ars.waitUntilDone(); if (ars.hasError()) { throw ars.getErrors(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java index e88805cdcc..fdecde99ce 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client; import java.io.Closeable; import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; @@ -28,7 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; /** - * Hbck fixup tool APIs. Obtain an instance from {@link ClusterConnection#getHbck()} and call + * Hbck fixup tool APIs. Obtain an instance from {@link Connection#getHbck()} and call * {@link #close()} when done. *

WARNING: the below methods can damage the cluster. It may leave the cluster in an * indeterminate state, e.g. region not assigned, or some hdfs files left behind. After running @@ -36,7 +35,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; * procedures to get regions back online. DO AT YOUR OWN RISK. For experienced users only. * * @see ConnectionFactory - * @see ClusterConnection * @since 2.0.2, 2.1.1 */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.HBCK) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java index 7ae97314c4..04da2ebe4a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java @@ -43,12 +43,13 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; */ @InterfaceAudience.Private abstract class MasterCallable implements RetryingCallable, Closeable { - protected final ClusterConnection connection; + protected final ConnectionImplementation connection; protected MasterKeepAliveConnection master; private final HBaseRpcController rpcController; - MasterCallable(final Connection connection, final RpcControllerFactory rpcConnectionFactory) { - this.connection = (ClusterConnection) connection; + MasterCallable(ConnectionImplementation connection, + final RpcControllerFactory rpcConnectionFactory) { + this.connection = connection; this.rpcController = rpcConnectionFactory.newController(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java index bfc161e5c2..bf557faa23 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java @@ -22,7 +22,6 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -31,15 +30,16 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; + import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; - -import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; /** * Callable that handles the multi method call going against a single @@ -52,7 +52,7 @@ class MultiServerCallable extends CancellableRegionServerCallable private MultiAction multiAction; private boolean cellBlock; - MultiServerCallable(final ClusterConnection connection, final TableName tableName, + MultiServerCallable(final ConnectionImplementation connection, final TableName tableName, final ServerName location, final MultiAction multi, RpcController rpcController, int rpcTimeout, RetryingTimeTracker tracker, int priority) { super(connection, tableName, null, rpcController, rpcTimeout, tracker, priority); @@ -141,7 +141,7 @@ class MultiServerCallable extends CancellableRegionServerCallable private boolean isCellBlock() { // This is not exact -- the configuration could have changed on us after connection was set up // but it will do for now. - ClusterConnection conn = getConnection(); + ConnectionImplementation conn = getConnection(); return conn.hasCellBlockSupport(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java index 2da8422f48..69f4f4ae61 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java @@ -46,8 +46,8 @@ public abstract class NoncedRegionServerCallable extends ClientServiceCallabl * @param tableName Table name to which row belongs. * @param row The row we want in tableName. */ - public NoncedRegionServerCallable(Connection connection, TableName tableName, byte [] row, - HBaseRpcController rpcController, int priority) { + public NoncedRegionServerCallable(ConnectionImplementation connection, TableName tableName, + byte[] row, HBaseRpcController rpcController, int priority) { super(connection, tableName, row, rpcController, priority); this.nonce = getConnection().getNonceGenerator().newNonce(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java index 448302c854..80371b764a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorRpcChannel.java @@ -46,7 +46,7 @@ class RegionCoprocessorRpcChannel extends SyncCoprocessorRpcChannel { private static final Logger LOG = LoggerFactory.getLogger(RegionCoprocessorRpcChannel.class); private final TableName table; private final byte [] row; - private final ClusterConnection conn; + private final ConnectionImplementation conn; private byte[] lastRegion; private final int operationTimeout; private final RpcRetryingCallerFactory rpcCallerFactory; @@ -57,7 +57,7 @@ class RegionCoprocessorRpcChannel extends SyncCoprocessorRpcChannel { * @param table to connect to * @param row to locate region with */ - RegionCoprocessorRpcChannel(ClusterConnection conn, TableName table, byte[] row) { + RegionCoprocessorRpcChannel(ConnectionImplementation conn, TableName table, byte[] row) { this.table = table; this.row = row; this.conn = conn; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java index 264304e1cf..009544cfc5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; - import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -27,11 +26,12 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; /** * Implementations make a RPC call against a RegionService via a protobuf Service. @@ -75,12 +75,12 @@ public abstract class RegionServerCallable implements RetryingCallable * @param tableName Table name to which row belongs. * @param row The row we want in tableName. */ - public RegionServerCallable(ConnectionImplementation connection, TableName tableName, byte [] row, + public RegionServerCallable(ConnectionImplementation connection, TableName tableName, byte[] row, RpcController rpcController) { this(connection, tableName, row, rpcController, HConstants.NORMAL_QOS); } - public RegionServerCallable(ConnectionImplementation connection, TableName tableName, byte [] row, + public RegionServerCallable(ConnectionImplementation connection, TableName tableName, byte[] row, RpcController rpcController, int priority) { super(); this.connection = connection; @@ -160,7 +160,7 @@ public abstract class RegionServerCallable implements RetryingCallable } /** - * @return {@link ClusterConnection} instance used by this Callable. + * @return {@link ConnectionImplementation} instance used by this Callable. */ protected ConnectionImplementation getConnection() { return this.connection; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java index 34c24c00a7..2ed037e8a9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java @@ -37,13 +37,6 @@ public class ReversedClientScanner extends ClientScanner { /** * Create a new ReversibleClientScanner for the specified table Note that the passed * {@link Scan}'s start row maybe changed. - * @param conf - * @param scan - * @param tableName - * @param connection - * @param pool - * @param primaryOperationTimeout - * @throws IOException */ public ReversedClientScanner(Configuration conf, Scan scan, TableName tableName, ConnectionImplementation connection, RpcRetryingCallerFactory rpcFactory, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java index 30e541cefe..6a325b26a9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java @@ -52,8 +52,8 @@ public class ReversedScannerCallable extends ScannerCallable { * @param rpcFactory to create an {@link com.google.protobuf.RpcController} to talk to the * regionserver */ - public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan, - ScanMetrics scanMetrics, RpcControllerFactory rpcFactory) { + public ReversedScannerCallable(ConnectionImplementation connection, TableName tableName, + Scan scan, ScanMetrics scanMetrics, RpcControllerFactory rpcFactory) { super(connection, tableName, scan, scanMetrics, rpcFactory); } @@ -66,8 +66,8 @@ public class ReversedScannerCallable extends ScannerCallable { * regionserver * @param replicaId the replica id */ - public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan, - ScanMetrics scanMetrics, RpcControllerFactory rpcFactory, int replicaId) { + public ReversedScannerCallable(ConnectionImplementation connection, TableName tableName, + Scan scan, ScanMetrics scanMetrics, RpcControllerFactory rpcFactory, int replicaId) { super(connection, tableName, scan, scanMetrics, rpcFactory, replicaId); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java index 45b74ef938..bf7135fab4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java @@ -101,23 +101,24 @@ public class ScannerCallable extends ClientServiceCallable { * @param scanMetrics the ScanMetrics to used, if it is null, ScannerCallable won't collect * metrics * @param rpcControllerFactory factory to use when creating - * {@link com.google.protobuf.RpcController} + * {@link com.google.protobuf.RpcController} */ - public ScannerCallable(ClusterConnection connection, TableName tableName, Scan scan, + public ScannerCallable(ConnectionImplementation connection, TableName tableName, Scan scan, ScanMetrics scanMetrics, RpcControllerFactory rpcControllerFactory) { this(connection, tableName, scan, scanMetrics, rpcControllerFactory, 0); } + /** - * * @param connection * @param tableName * @param scan * @param scanMetrics * @param id the replicaId */ - public ScannerCallable(ClusterConnection connection, TableName tableName, Scan scan, + public ScannerCallable(ConnectionImplementation connection, TableName tableName, Scan scan, ScanMetrics scanMetrics, RpcControllerFactory rpcControllerFactory, int id) { - super(connection, tableName, scan.getStartRow(), rpcControllerFactory.newController(), scan.getPriority()); + super(connection, tableName, scan.getStartRow(), rpcControllerFactory.newController(), + scan.getPriority()); this.id = id; this.scan = scan; this.scanMetrics = scanMetrics; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java index 27e5f87772..db956ce81c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java @@ -76,7 +76,7 @@ class ScannerCallableWithReplicas implements RetryingCallable { public ScannerCallableWithReplicas(TableName tableName, ConnectionImplementation cConnection, ScannerCallable baseCallable, ExecutorService pool, int timeBeforeReplicas, Scan scan, int retries, int scannerTimeout, int caching, Configuration conf, - RpcRetryingCaller caller) { + RpcRetryingCaller caller) { this.currentScannerCallable = baseCallable; this.cConnection = cConnection; this.pool = pool; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java index 70d10cad7b..d4781d12ed 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java @@ -1124,11 +1124,8 @@ public class TestAsyncProcess { 1, BufferedMutator.MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS); } - private void checkPeriodicFlushParameters(ClusterConnection conn, - MyAsyncProcess ap, - long setTO, long expectTO, - long setTT, long expectTT - ) { + private void checkPeriodicFlushParameters(ConnectionImplementation conn, MyAsyncProcess ap, + long setTO, long expectTO, long setTT, long expectTT) { BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE); // The BufferedMutatorParams does nothing with the value diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java index f0375e2959..647ea32a5e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java @@ -44,8 +44,7 @@ public class TestBufferedMutator { public TestName name = new TestName(); /** - * My BufferedMutator. - * Just to prove that I can insert a BM other than default. + * My BufferedMutator. Just to prove that I can insert a BM other than default. */ public static class MyBufferedMutator extends BufferedMutatorImpl { MyBufferedMutator(ConnectionImplementation conn, RpcRetryingCallerFactory rpcCallerFactory, diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java index 49697b8315..755096e22f 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java @@ -18,27 +18,25 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; -import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.Map; +import java.util.TreeMap; import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.ClientServiceCallable; -import org.apache.hadoop.hbase.client.ClusterConnection; -import org.apache.hadoop.hbase.client.RpcRetryingCaller; -import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; -import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.tool.BulkLoadHFiles; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; import org.junit.BeforeClass; import org.junit.ClassRule; import org.junit.Ignore; @@ -50,10 +48,6 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; - /** * Tests bulk loading of HFiles with old secure Endpoint client for backward compatibility. Will be * removed when old non-secure client for backward compatibility is not supported. @@ -99,57 +93,26 @@ public class TestHRegionServerBulkLoadWithOldSecureEndpoint extends TestHRegionS // create HFiles for different column families FileSystem fs = UTIL.getTestFileSystem(); byte[] val = Bytes.toBytes(String.format("%010d", iteration)); - final List> famPaths = new ArrayList<>(NUM_CFS); + Map> family2Files = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (int i = 0; i < NUM_CFS; i++) { Path hfile = new Path(dir, family(i)); byte[] fam = Bytes.toBytes(family(i)); createHFile(fs, hfile, fam, QUAL, val, 1000); - famPaths.add(new Pair<>(fam, hfile.toString())); + family2Files.put(fam, Collections.singletonList(hfile)); } // bulk load HFiles - final ClusterConnection conn = (ClusterConnection) UTIL.getAdmin().getConnection(); - Table table = conn.getTable(tableName); - final String bulkToken = new SecureBulkLoadEndpointClient(table).prepareBulkLoad(tableName); - RpcControllerFactory rpcControllerFactory = new RpcControllerFactory(UTIL.getConfiguration()); - ClientServiceCallable callable = - new ClientServiceCallable(conn, tableName, Bytes.toBytes("aaa"), - rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { - @Override - protected Void rpcCall() throws Exception { - LOG.debug("Going to connect to server " + getLocation() + " for row " + - Bytes.toStringBinary(getRow())); - try (Table table = conn.getTable(getTableName())) { - boolean loaded = new SecureBulkLoadEndpointClient(table).bulkLoadHFiles(famPaths, - null, bulkToken, getLocation().getRegionInfo().getStartKey()); - } - return null; - } - }; - RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(conf); - RpcRetryingCaller caller = factory. newCaller(); - caller.callWithRetries(callable, Integer.MAX_VALUE); + BulkLoadHFiles.create(UTIL.getConfiguration()).bulkLoad(tableName, family2Files); + final Connection conn = UTIL.getConnection(); // Periodically do compaction to reduce the number of open file handles. if (numBulkLoads.get() % 5 == 0) { // 5 * 50 = 250 open file handles! - callable = new ClientServiceCallable(conn, tableName, Bytes.toBytes("aaa"), - rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { - @Override - protected Void rpcCall() throws Exception { - LOG.debug("compacting " + getLocation() + " for row " - + Bytes.toStringBinary(getRow())); - AdminProtos.AdminService.BlockingInterface server = - conn.getAdmin(getLocation().getServerName()); - CompactRegionRequest request = - RequestConverter.buildCompactRegionRequest( - getLocation().getRegionInfo().getRegionName(), true, null); - server.compactRegion(null, request); - numCompactions.incrementAndGet(); - return null; - } - }; - caller.callWithRetries(callable, Integer.MAX_VALUE); + try (RegionLocator locator = conn.getRegionLocator(tableName)) { + HRegionLocation loc = locator.getRegionLocation(Bytes.toBytes("aaa"), true); + conn.getAdmin().compactRegion(loc.getRegion().getRegionName()); + numCompactions.incrementAndGet(); + } } } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java index 30a3db9625..2a7b7cd105 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase; import java.io.IOException; import java.util.ArrayList; import java.util.Comparator; +import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Objects; @@ -28,7 +29,6 @@ import java.util.TreeSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterManager.ServiceType; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionInfo; @@ -37,10 +37,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; - /** * Manages the interactions with an already deployed distributed cluster (as opposed to * a pseudo-distributed, or mini/local cluster). This is used by integration and system tests. @@ -99,18 +95,6 @@ public class DistributedHBaseCluster extends HBaseCluster { } } - @Override - public AdminProtos.AdminService.BlockingInterface getAdminProtocol(ServerName serverName) - throws IOException { - return ((ClusterConnection)this.connection).getAdmin(serverName); - } - - @Override - public ClientProtos.ClientService.BlockingInterface getClientProtocol(ServerName serverName) - throws IOException { - return ((ClusterConnection)this.connection).getClient(serverName); - } - @Override public void startRegionServer(String hostname, int port) throws IOException { LOG.info("Starting RS on: " + hostname); @@ -262,13 +246,6 @@ public class DistributedHBaseCluster extends HBaseCluster { throw new IOException("did timeout waiting for service to start:" + serverName); } - - @Override - public MasterService.BlockingInterface getMasterAdminService() - throws IOException { - return ((ClusterConnection)this.connection).getMaster(); - } - @Override public void startMaster(String hostname, int port) throws IOException { LOG.info("Starting Master on: " + hostname + ":" + port); @@ -297,7 +274,7 @@ public class DistributedHBaseCluster extends HBaseCluster { long start = System.currentTimeMillis(); while (System.currentTimeMillis() - start < timeout) { try { - getMasterAdminService(); + connection.getAdmin().getClusterMetrics(EnumSet.of(ClusterMetrics.Option.HBASE_VERSION)); return true; } catch (MasterNotRunningException m) { LOG.warn("Master not started yet " + m); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java index 906abca05c..eff26d7994 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java @@ -36,8 +36,8 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.BufferedMutatorParams; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; @@ -98,7 +98,7 @@ public class TestMultiTableInputFormatBase { // canned responses. JobContext mockedJobContext = Mockito.mock(JobContext.class); Configuration c = HBaseConfiguration.create(); - c.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, MRSplitsConnection.class.getName()); + c.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, MRSplitsConnection.class.getName()); Mockito.when(mockedJobContext.getConfiguration()).thenReturn(c); // Invent a bunch of scans. Have each Scan go against a different table so a good spread. List scans = new ArrayList<>(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java index 4436ee1091..944bd1049b 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase.mapreduce; -import static org.junit.Assert.*; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyBoolean; +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -40,8 +40,8 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.BufferedMutatorParams; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionLocator; @@ -90,7 +90,7 @@ public class TestTableInputFormatBase { public void testNonSuccessiveSplitsAreNotMerged() throws IOException { JobContext context = mock(JobContext.class); Configuration conf = HBaseConfiguration.create(); - conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, + conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, ConnectionForMergeTesting.class.getName()); conf.set(TableInputFormat.INPUT_TABLE, "testTable"); conf.setBoolean(TableInputFormatBase.MAPREDUCE_INPUT_AUTOBALANCE, true); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java index c33d5affc2..e0e95dfe8b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java @@ -22,7 +22,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.client.AsyncClusterConnection; import org.apache.hadoop.hbase.client.AsyncConnection; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; @@ -54,14 +53,6 @@ public interface Server extends Abortable, Stoppable { Connection createConnection(Configuration conf) throws IOException; - /** - * Returns a reference to the servers' cluster connection. Prefer {@link #getConnection()}. - * - * Important note: this method returns a reference to Connection which is managed - * by Server itself, so callers must NOT attempt to close connection obtained. - */ - ClusterConnection getClusterConnection(); - /** * Returns a reference to the servers' async connection. *

diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java index 142788e761..af0d560ddd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java @@ -18,14 +18,13 @@ package org.apache.hadoop.hbase.backup.example; import java.io.IOException; - -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; /** @@ -36,9 +35,9 @@ public class ZKTableArchiveClient extends Configured { /** Configuration key for the archive node. */ private static final String ZOOKEEPER_ZNODE_HFILE_ARCHIVE_KEY = "zookeeper.znode.hfile.archive"; - private ClusterConnection connection; + private Connection connection; - public ZKTableArchiveClient(Configuration conf, ClusterConnection connection) { + public ZKTableArchiveClient(Configuration conf, Connection connection) { super(conf); this.connection = connection; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java index c3f8f8b87b..45dc8be12e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnection.java @@ -64,7 +64,7 @@ public interface AsyncClusterConnection extends AsyncConnection { List entries, int replicaId, int numRetries, long operationTimeoutNs); /** - * Return all the replicas for a region. Used for regiong replica replication. + * Return all the replicas for a region. Used for region replica replication. */ CompletableFuture getRegionLocations(TableName tableName, byte[] row, boolean reload); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index cf56c4fd16..b7be6a931c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1165,7 +1165,7 @@ public class HMaster extends HRegionServer implements MasterServices { if (QuotaUtil.isQuotaEnabled(conf)) { // Create the quota snapshot notifier spaceQuotaSnapshotNotifier = createQuotaSnapshotNotifier(); - spaceQuotaSnapshotNotifier.initialize(getClusterConnection()); + spaceQuotaSnapshotNotifier.initialize(getConnection()); this.quotaObserverChore = new QuotaObserverChore(this, getMasterMetrics()); // Start the chore to read the region FS space reports and act on them getChoreService().scheduleChore(quotaObserverChore); @@ -1262,7 +1262,7 @@ public class HMaster extends HRegionServer implements MasterServices { */ private boolean waitForNamespaceOnline() throws InterruptedException, IOException { TableState nsTableState = - MetaTableAccessor.getTableState(getClusterConnection(), TableName.NAMESPACE_TABLE_NAME); + MetaTableAccessor.getTableState(getConnection(), TableName.NAMESPACE_TABLE_NAME); if (nsTableState == null || nsTableState.isDisabled()) { // this means we have already migrated the data and disabled or deleted the namespace table, // or this is a new depliy which does not have a namespace table from the beginning. @@ -1852,7 +1852,7 @@ public class HMaster extends HRegionServer implements MasterServices { List plans = this.normalizer.computePlanForTable(table); if (plans != null) { for (NormalizationPlan plan : plans) { - plan.execute(clusterConnection.getAdmin()); + plan.execute(connection.getAdmin()); if (plan.getType() == PlanType.SPLIT) { splitPlanCount++; } else if (plan.getType() == PlanType.MERGE) { @@ -3043,8 +3043,8 @@ public class HMaster extends HRegionServer implements MasterServices { // this is what we want especially if the Master is in startup phase doing call outs to // hbase:meta, etc. when cluster is down. Without ths connection close, we'd have to wait on // the rpc to timeout. - if (this.clusterConnection != null) { - this.clusterConnection.close(); + if (this.connection != null) { + this.connection.close(); } if (this.asyncClusterConnection != null) { this.asyncClusterConnection.close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java index 97ae7eadd6..285fc626c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java @@ -28,19 +28,16 @@ import java.util.stream.Stream; import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.favored.FavoredNodesManager; -import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; - -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; @@ -66,22 +63,15 @@ final class AssignmentManagerUtil { static GetRegionInfoResponse getRegionInfoResponse(final MasterProcedureEnv env, final ServerName regionLocation, final RegionInfo hri, boolean includeBestSplitRow) throws IOException { - // TODO: There is no timeout on this controller. Set one! - HBaseRpcController controller = - env.getMasterServices().getClusterConnection().getRpcControllerFactory().newController(); - final AdminService.BlockingInterface admin = - env.getMasterServices().getClusterConnection().getAdmin(regionLocation); + AsyncRegionServerAdmin admin = + env.getMasterServices().getAsyncClusterConnection().getRegionServerAdmin(regionLocation); GetRegionInfoRequest request = null; if (includeBestSplitRow) { request = RequestConverter.buildGetRegionInfoRequest(hri.getRegionName(), false, true); } else { request = RequestConverter.buildGetRegionInfoRequest(hri.getRegionName()); } - try { - return admin.getRegionInfo(controller, request); - } catch (ServiceException e) { - throw ProtobufUtil.handleRemoteException(e); - } + return FutureUtils.get(admin.getRegionInfo(request)); } private static void lock(List regionNodes) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java index 39727000ff..b9797bc8b7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerSpaceQuotaManager.java @@ -90,7 +90,7 @@ public class RegionServerSpaceQuotaManager { return; } // Start the chores - this.spaceQuotaRefresher = new SpaceQuotaRefresherChore(this, rsServices.getClusterConnection()); + this.spaceQuotaRefresher = new SpaceQuotaRefresherChore(this, rsServices.getConnection()); rsServices.getChoreService().scheduleChore(spaceQuotaRefresher); this.regionSizeReporter = new RegionSizeReportingChore(rsServices); rsServices.getChoreService().scheduleChore(regionSizeReporter); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DisableTableViolationPolicyEnforcement.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DisableTableViolationPolicyEnforcement.java index 9d24c92bcc..eb2e27ac25 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DisableTableViolationPolicyEnforcement.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/policies/DisableTableViolationPolicyEnforcement.java @@ -17,17 +17,17 @@ package org.apache.hadoop.hbase.quotas.policies; import java.io.IOException; - import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.quotas.SpaceLimitingException; import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy; import org.apache.hadoop.hbase.quotas.SpaceViolationPolicyEnforcement; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A {@link SpaceViolationPolicyEnforcement} which disables the table. The enforcement @@ -44,7 +44,9 @@ public class DisableTableViolationPolicyEnforcement extends DefaultViolationPoli if (LOG.isTraceEnabled()) { LOG.trace("Starting disable of " + getTableName()); } - getRegionServerServices().getClusterConnection().getAdmin().disableTable(getTableName()); + try (Admin admin = getRegionServerServices().getConnection().getAdmin()) { + admin.disableTable(getTableName()); + } if (LOG.isTraceEnabled()) { LOG.trace("Disable is complete for " + getTableName()); } @@ -59,7 +61,9 @@ public class DisableTableViolationPolicyEnforcement extends DefaultViolationPoli if (LOG.isTraceEnabled()) { LOG.trace("Starting enable of " + getTableName()); } - getRegionServerServices().getClusterConnection().getAdmin().enableTable(getTableName()); + try (Admin admin = getRegionServerServices().getConnection().getAdmin()) { + admin.enableTable(getTableName()); + } if (LOG.isTraceEnabled()) { LOG.trace("Enable is complete for " + getTableName()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 880525dfd7..ca1c5f1f73 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -83,7 +83,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.YouAreDeadException; import org.apache.hadoop.hbase.ZNodeClearer; import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ClusterConnectionFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionUtils; @@ -258,14 +257,17 @@ public class HRegionServer extends HasThread implements protected HeapMemoryManager hMemManager; /** - * Cluster connection to be shared by services. + * Connection to be shared by services. + *

* Initialized at server startup and closed when server shuts down. + *

* Clients must never close it explicitly. - * Clients hosted by this Server should make use of this clusterConnection rather than create - * their own; if they create their own, there is no way for the hosting server to shutdown - * ongoing client RPCs. + *

+ * Clients hosted by this Server should make use of this connection rather than create their own; + * if they create their own, there is no way for the hosting server to shutdown ongoing client + * RPCs. */ - protected ClusterConnection clusterConnection; + protected Connection connection; /** * The asynchronous cluster connection to be shared by services. @@ -804,11 +806,11 @@ public class HRegionServer extends HasThread implements * Create a 'smarter' Connection, one that is capable of by-passing RPC if the request is to the * local server; i.e. a short-circuit Connection. Safe to use going to local or remote server. */ - private ClusterConnection createClusterConnection() throws IOException { + private Connection createConnection() throws IOException { // Create a cluster connection that when appropriate, can short-circuit and go directly to the // local server if the request is to the local server bypassing RPC. Can be used for both local // and remote invocations. - ClusterConnection conn = + Connection conn = ConnectionUtils.createShortCircuitConnection(unsetClientZookeeperQuorum(), null, userProvider.getCurrent(), serverName, rpcServices, rpcServices); // This is used to initialize the batch thread pool inside the connection implementation. @@ -845,8 +847,8 @@ public class HRegionServer extends HasThread implements * Setup our cluster connection if not already initialized. */ protected final synchronized void setupClusterConnection() throws IOException { - if (clusterConnection == null) { - clusterConnection = createClusterConnection(); + if (connection == null) { + connection = createConnection(); asyncClusterConnection = ClusterConnectionFactory.createAsyncClusterConnection(unsetClientZookeeperQuorum(), new InetSocketAddress(this.rpcServices.isa.getAddress(), 0), userProvider.getCurrent()); @@ -1118,9 +1120,9 @@ public class HRegionServer extends HasThread implements LOG.info("stopping server " + this.serverName); } - if (this.clusterConnection != null && !clusterConnection.isClosed()) { + if (this.connection != null && !connection.isClosed()) { try { - this.clusterConnection.close(); + this.connection.close(); } catch (IOException e) { // Although the {@link Closeable} interface throws an {@link // IOException}, in reality, the implementation would never do that. @@ -2188,12 +2190,7 @@ public class HRegionServer extends HasThread implements @Override public Connection getConnection() { - return getClusterConnection(); - } - - @Override - public ClusterConnection getClusterConnection() { - return this.clusterConnection; + return this.connection; } @Override @@ -2296,7 +2293,7 @@ public class HRegionServer extends HasThread implements } } else { try { - MetaTableAccessor.updateRegionLocation(clusterConnection, + MetaTableAccessor.updateRegionLocation(connection, hris[0], serverName, openSeqNum, masterSystemTime); } catch (IOException e) { LOG.info("Failed to update meta", e); @@ -2324,7 +2321,7 @@ public class HRegionServer extends HasThread implements // Keep looping till we get an error. We want to send reports even though server is going down. // Only go down if clusterConnection is null. It is set to null almost as last thing as the // HRegionServer does down. - while (this.clusterConnection != null && !this.clusterConnection.isClosed()) { + while (this.connection != null && !this.connection.isClosed()) { RegionServerStatusService.BlockingInterface rss = rssStub; try { if (rss == null) { @@ -3775,7 +3772,7 @@ public class HRegionServer extends HasThread implements @Override public void unassign(byte[] regionName) throws IOException { - clusterConnection.getAdmin().unassign(regionName, false); + connection.getAdmin().unassign(regionName, false); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java index a960c3146a..67e945f245 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.replication.TableCFs; @@ -207,7 +207,7 @@ public class DumpReplicationQueues extends Configured implements Tool { Configuration conf = getConf(); HBaseAdmin.available(conf); - ClusterConnection connection = (ClusterConnection) ConnectionFactory.createConnection(conf); + Connection connection = ConnectionFactory.createConnection(conf); Admin admin = connection.getAdmin(); ZKWatcher zkw = new ZKWatcher(conf, "DumpReplicationQueues" + System.currentTimeMillis(), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java index 7d1245c408..bbd7675053 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -153,7 +152,7 @@ public class ReplicationSyncUp extends Configured implements Tool { } @Override - public ClusterConnection getConnection() { + public Connection getConnection() { return null; } @@ -162,11 +161,6 @@ public class ReplicationSyncUp extends Configured implements Tool { return null; } - @Override - public ClusterConnection getClusterConnection() { - return null; - } - @Override public FileSystem getFileSystem() { return null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index dfaa16d035..3c84da013d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -89,7 +89,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; @@ -161,9 +160,6 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Ordering; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.BlockingInterface; - /** * HBaseFsck (hbck) is a tool for checking and repairing region consistency and * table integrity problems in a corrupted HBase. This tool was written for hbase-1.x. It does not @@ -245,7 +241,7 @@ public class HBaseFsck extends Configured implements Closeable { **********************/ private static final Logger LOG = LoggerFactory.getLogger(HBaseFsck.class.getName()); private ClusterMetrics status; - private ClusterConnection connection; + private Connection connection; private Admin admin; private Table meta; // threads to do ||izable tasks: retrieve data from regionservers, handle overlapping regions @@ -585,7 +581,7 @@ public class HBaseFsck extends Configured implements Closeable { LOG.info("Launching hbck"); - connection = (ClusterConnection)ConnectionFactory.createConnection(getConf()); + connection = ConnectionFactory.createConnection(getConf()); admin = connection.getAdmin(); meta = connection.getTable(TableName.META_TABLE_NAME); status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, @@ -4332,10 +4328,10 @@ public class HBaseFsck extends Configured implements Closeable { private final HBaseFsck hbck; private final ServerName rsinfo; private final ErrorReporter errors; - private final ClusterConnection connection; + private final Connection connection; WorkItemRegion(HBaseFsck hbck, ServerName info, - ErrorReporter errors, ClusterConnection connection) { + ErrorReporter errors, Connection connection) { this.hbck = hbck; this.rsinfo = info; this.errors = errors; @@ -4346,32 +4342,29 @@ public class HBaseFsck extends Configured implements Closeable { public synchronized Void call() throws IOException { errors.progress(); try { - BlockingInterface server = connection.getAdmin(rsinfo); - // list all online regions from this region server - List regions = ProtobufUtil.getOnlineRegions(server); + List regions = connection.getAdmin().getRegions(rsinfo); regions = filterRegions(regions); if (details) { - errors.detail("RegionServer: " + rsinfo.getServerName() + - " number of regions: " + regions.size()); - for (RegionInfo rinfo: regions) { - errors.detail(" " + rinfo.getRegionNameAsString() + - " id: " + rinfo.getRegionId() + - " encoded_name: " + rinfo.getEncodedName() + - " start: " + Bytes.toStringBinary(rinfo.getStartKey()) + - " end: " + Bytes.toStringBinary(rinfo.getEndKey())); + errors.detail( + "RegionServer: " + rsinfo.getServerName() + " number of regions: " + regions.size()); + for (RegionInfo rinfo : regions) { + errors.detail(" " + rinfo.getRegionNameAsString() + " id: " + rinfo.getRegionId() + + " encoded_name: " + rinfo.getEncodedName() + " start: " + + Bytes.toStringBinary(rinfo.getStartKey()) + " end: " + + Bytes.toStringBinary(rinfo.getEndKey())); } } // check to see if the existence of this region matches the region in META - for (RegionInfo r:regions) { + for (RegionInfo r : regions) { HbckInfo hbi = hbck.getOrCreateInfo(r.getEncodedName()); hbi.addServer(r, rsinfo); } - } catch (IOException e) { // unable to connect to the region server. - errors.reportError(ERROR_CODE.RS_CONNECT_FAILURE, "RegionServer: " + rsinfo.getServerName() + - " Unable to fetch region information. " + e); + } catch (IOException e) { // unable to connect to the region server. + errors.reportError(ERROR_CODE.RS_CONNECT_FAILURE, + "RegionServer: " + rsinfo.getServerName() + " Unable to fetch region information. " + e); throw e; } return null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java index 121d06c7bf..d4a28c35ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ClusterConnectionFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java index 58057932bd..d095fa361d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java @@ -26,19 +26,17 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.client.coprocessor.Batch; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Provides ability to create multiple Connection instances and allows to process a batch of @@ -112,14 +110,11 @@ public class MultiHConnection { * @param callback to run when results are in * @throws IOException If IO failure occurs */ - @SuppressWarnings("deprecation") public void processBatchCallback(List actions, TableName tableName, Object[] results, Batch.Callback callback) throws IOException { // Currently used by RegionStateStore - ClusterConnection conn = - (ClusterConnection) connections[ThreadLocalRandom.current().nextInt(noOfConnections)]; - - HTable.doBatchWithCallback(actions, results, callback, conn, batchPool, tableName); + HTable.doBatchWithCallback(actions, results, callback, + connections[ThreadLocalRandom.current().nextInt(noOfConnections)], batchPool, tableName); } // Copied from ConnectionImplementation.getBatchPool() diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java index a779d36e50..540e7f3a74 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.ServerName; @@ -50,6 +49,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.NoServerForRegionException; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -550,7 +550,7 @@ public class RegionSplitter { } // make sure this region wasn't already split - byte[] sk = regionLoc.getRegionInfo().getStartKey(); + byte[] sk = regionLoc.getRegion().getStartKey(); if (sk.length != 0) { if (Bytes.equals(split, sk)) { LOG.debug("Region already split on " @@ -712,7 +712,6 @@ public class RegionSplitter { htd = table.getDescriptor(); } try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) { - // for every region that hasn't been verified as a finished split for (Pair region : regionList) { byte[] start = region.getFirst(); @@ -720,7 +719,7 @@ public class RegionSplitter { // see if the new split daughter region has come online try { - HRegionInfo dri = regionLocator.getRegionLocation(split).getRegionInfo(); + RegionInfo dri = regionLocator.getRegionLocation(split, true).getRegion(); if (dri.isOffline() || !Bytes.equals(dri.getStartKey(), split)) { logicalSplitting.add(region); continue; @@ -735,10 +734,10 @@ public class RegionSplitter { try { // when a daughter region is opened, a compaction is triggered // wait until compaction completes for both daughter regions - LinkedList check = Lists.newLinkedList(); - check.add(regionLocator.getRegionLocation(start).getRegionInfo()); - check.add(regionLocator.getRegionLocation(split).getRegionInfo()); - for (HRegionInfo hri : check.toArray(new HRegionInfo[check.size()])) { + LinkedList check = Lists.newLinkedList(); + check.add(regionLocator.getRegionLocation(start).getRegion()); + check.add(regionLocator.getRegionLocation(split).getRegion()); + for (RegionInfo hri : check.toArray(new RegionInfo[check.size()])) { byte[] sk = hri.getStartKey(); if (sk.length == 0) sk = splitAlgo.firstRow(); diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index d3eaa77f44..4e2688671b 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -261,7 +261,7 @@ if ( fqtn != null ) { stateMap.put(regionInfo.getEncodedName(), regionState); } } - RegionLocator r = master.getClusterConnection().getRegionLocator(table.getName()); + RegionLocator r = master.getConnection().getRegionLocator(table.getName()); try { %>

Table Attributes

diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java index d760a7dd8a..8020553e4f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase; import java.io.Closeable; import java.io.IOException; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.RegionInfoBuilder; @@ -28,10 +27,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; - /** * This class defines methods that can help with managing HBase clusters * from unit tests and system tests. There are 3 types of cluster deployments: @@ -97,24 +92,6 @@ public abstract class HBaseCluster implements Closeable, Configurable { return initialClusterStatus; } - /** - * Returns an {@link MasterService.BlockingInterface} to the active master - */ - public abstract MasterService.BlockingInterface getMasterAdminService() - throws IOException; - - /** - * Returns an AdminProtocol interface to the regionserver - */ - public abstract AdminService.BlockingInterface getAdminProtocol(ServerName serverName) - throws IOException; - - /** - * Returns a ClientProtocol interface to the regionserver - */ - public abstract ClientService.BlockingInterface getClientProtocol(ServerName serverName) - throws IOException; - /** * Starts a new region server on the given hostname or if this is a mini/local cluster, * starts a region server locally. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java index 473eb74abf..92cfddfad9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -24,7 +24,6 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.master.HMaster; @@ -42,9 +41,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; /** @@ -517,15 +513,6 @@ public class MiniHBaseCluster extends HBaseCluster { return t; } - /** - * Returns the current active master, if available. - * @return the active HMaster, null if none is active. - */ - @Override - public MasterService.BlockingInterface getMasterAdminService() { - return this.hbaseCluster.getActiveMaster().getMasterRpcServices(); - } - /** * Returns the current active master, if available. * @return the active HMaster, null if none is active. @@ -921,15 +908,4 @@ public class MiniHBaseCluster extends HBaseCluster { } return -1; } - - @Override - public AdminService.BlockingInterface getAdminProtocol(ServerName serverName) throws IOException { - return getRegionServer(getRegionServerIndex(serverName)).getRSRpcServices(); - } - - @Override - public ClientService.BlockingInterface getClientProtocol(ServerName serverName) - throws IOException { - return getRegionServer(getRegionServerIndex(serverName)).getRSRpcServices(); - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java index 5205960709..f3e8d12c96 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java @@ -32,7 +32,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.locking.EntityLock; @@ -160,7 +159,7 @@ public class MockRegionServerServices implements RegionServerServices { } @Override - public ClusterConnection getConnection() { + public Connection getConnection() { return null; } @@ -264,7 +263,6 @@ public class MockRegionServerServices implements RegionServerServices { @Override public ServerNonceManager getNonceManager() { - // TODO Auto-generated method stub return null; } @@ -275,7 +273,6 @@ public class MockRegionServerServices implements RegionServerServices { @Override public boolean registerService(Service service) { - // TODO Auto-generated method stub return false; } @@ -289,11 +286,6 @@ public class MockRegionServerServices implements RegionServerServices { return 0; } - @Override - public ClusterConnection getClusterConnection() { - return null; - } - @Override public ThroughputController getFlushThroughputController() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java index 16f3930e3d..618fe74b93 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java @@ -35,9 +35,9 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Stoppable; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; @@ -86,7 +86,7 @@ public class TestZooKeeperTableArchiveClient { private static final byte[] TABLE_NAME = Bytes.toBytes(STRING_TABLE_NAME); private static ZKTableArchiveClient archivingClient; private final List toCleanup = new ArrayList<>(); - private static ClusterConnection CONNECTION; + private static Connection CONNECTION; private static RegionServerServices rss; /** @@ -96,7 +96,7 @@ public class TestZooKeeperTableArchiveClient { public static void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniZKCluster(); - CONNECTION = (ClusterConnection)ConnectionFactory.createConnection(UTIL.getConfiguration()); + CONNECTION = ConnectionFactory.createConnection(UTIL.getConfiguration()); archivingClient = new ZKTableArchiveClient(UTIL.getConfiguration(), CONNECTION); // make hfile archiving node so we can archive files ZKWatcher watcher = UTIL.getZooKeeperWatcher(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java index 2a5a39570c..fe86ddec38 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java @@ -57,11 +57,11 @@ public class HConnectionTestingUtility { * @throws ZooKeeperConnectionException */ public static ConnectionImplementation getMockedConnection(final Configuration conf) - throws ZooKeeperConnectionException { + throws ZooKeeperConnectionException { ConnectionImplementation connection = Mockito.mock(ConnectionImplementation.class); Mockito.when(connection.getConfiguration()).thenReturn(conf); - Mockito.when(connection.getRpcControllerFactory()).thenReturn( - Mockito.mock(RpcControllerFactory.class)); + Mockito.when(connection.getRpcControllerFactory()) + .thenReturn(Mockito.mock(RpcControllerFactory.class)); // we need a real retrying caller RpcRetryingCallerFactory callerFactory = new RpcRetryingCallerFactory(conf); Mockito.when(connection.getRpcRetryingCallerFactory()).thenReturn(callerFactory); @@ -81,11 +81,10 @@ public class HConnectionTestingUtility { * the mocked connection * @return Mock up a connection that returns a {@link Configuration} when * {@link ConnectionImplementation#getConfiguration()} is called, a 'location' when - * {@link ConnectionImplementation#getRegionLocation(TableName,byte[], boolean)} - * is called, and that returns the passed - * {@link AdminProtos.AdminService.BlockingInterface} instance when - * {@link ConnectionImplementation#getAdmin(ServerName)} is called, returns the passed - * {@link ClientProtos.ClientService.BlockingInterface} instance when + * {@link ConnectionImplementation#getRegionLocation(TableName, byte[], boolean)} is + * called, and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} + * instance when {@link ConnectionImplementation#getAdmin(ServerName)} is called, returns + * the passed {@link ClientProtos.ClientService.BlockingInterface} instance when * {@link ConnectionImplementation#getClient(ServerName)} is called (Be sure to call * {@link Connection#close()} when done with this mocked Connection. */ @@ -138,9 +137,14 @@ public class HConnectionTestingUtility { * calling {@link Connection#close()} else it will stick around; this is probably not what you * want. * @param conf configuration +<<<<<<< HEAD * @return ConnectionImplementation object for conf * @throws ZooKeeperConnectionException [Dead link]: See also * {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)} +======= + * @return ClusterConnection object for conf [Dead link]: See also + * {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)} +>>>>>>> 7e889e40e8... tmp */ public static ConnectionImplementation getSpiedConnection(final Configuration conf) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java index dfc3a2cd71..4069faf2e7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java @@ -632,8 +632,8 @@ public class TestAdmin1 { assertFalse(this.admin.tableExists(tableName)); } - protected void verifyRoundRobinDistribution(ClusterConnection c, RegionLocator regionLocator, int - expectedRegions) throws IOException { + private void verifyRoundRobinDistribution(ConnectionImplementation c, + RegionLocator regionLocator, int expectedRegions) throws IOException { int numRS = c.getCurrentNrHRS(); List regions = regionLocator.getAllRegionLocations(); Map> server2Regions = new HashMap<>(); @@ -652,13 +652,14 @@ public class TestAdmin1 { // which contains less regions by intention. numRS--; } - float average = (float) expectedRegions/numRS; - int min = (int)Math.floor(average); - int max = (int)Math.ceil(average); + float average = (float) expectedRegions / numRS; + int min = (int) Math.floor(average); + int max = (int) Math.ceil(average); for (List regionList : server2Regions.values()) { - assertTrue("numRS=" + numRS + ", min=" + min + ", max=" + max + - ", size=" + regionList.size() + ", tablesOnMaster=" + tablesOnMaster, - regionList.size() == min || regionList.size() == max); + assertTrue( + "numRS=" + numRS + ", min=" + min + ", max=" + max + ", size=" + regionList.size() + + ", tablesOnMaster=" + tablesOnMaster, + regionList.size() == min || regionList.size() == max); } } @@ -739,7 +740,7 @@ public class TestAdmin1 { List regions; Iterator hris; RegionInfo hri; - ClusterConnection conn = (ClusterConnection) TEST_UTIL.getConnection(); + ConnectionImplementation conn = (ConnectionImplementation) TEST_UTIL.getConnection(); try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) { regions = l.getAllRegionLocations(); @@ -1257,13 +1258,9 @@ public class TestAdmin1 { byte[][] nameofRegionsToMerge = new byte[2][]; nameofRegionsToMerge[0] = regions.get(1).getFirst().getEncodedNameAsBytes(); nameofRegionsToMerge[1] = regions.get(2).getFirst().getEncodedNameAsBytes(); - MergeTableRegionsRequest request = RequestConverter - .buildMergeTableRegionsRequest( - nameofRegionsToMerge, - true, - HConstants.NO_NONCE, - HConstants.NO_NONCE); - ((ClusterConnection) TEST_UTIL.getAdmin().getConnection()).getMaster() + MergeTableRegionsRequest request = RequestConverter.buildMergeTableRegionsRequest( + nameofRegionsToMerge, true, HConstants.NO_NONCE, HConstants.NO_NONCE); + ((ConnectionImplementation) TEST_UTIL.getAdmin().getConnection()).getMaster() .mergeTableRegions(null, request); } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException m) { Throwable t = m.getCause(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java index 2c14eaf48c..19dfd30fee 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java @@ -783,7 +783,7 @@ public class TestAdmin2 { Assert.assertNotNull(store); Assert.assertEquals(expectedStoreFilesSize, store.getSize()); - ClusterConnection conn = ((ClusterConnection) admin.getConnection()); + ConnectionImplementation conn = (ConnectionImplementation) admin.getConnection(); HBaseRpcController controller = conn.getRpcControllerFactory().newController(); for (int i = 0; i < 10; i++) { RegionInfo ri = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java index 1750926785..1c8b4cd2f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java @@ -274,7 +274,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase { private void verifyRoundRobinDistribution(List regions, int expectedRegions) throws IOException { - int numRS = ((ClusterConnection) TEST_UTIL.getConnection()).getCurrentNrHRS(); + int numRS = ((ConnectionImplementation) TEST_UTIL.getConnection()).getCurrentNrHRS(); Map> server2Regions = new HashMap<>(); regions.stream().forEach((loc) -> { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java index cd27a30c26..fd0eb7b87a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCISleep.java @@ -94,9 +94,9 @@ public class TestCISleep extends AbstractTestCITimeout { final TableName tableName = TableName.valueOf(name.getMethodName()); TEST_UTIL.createTable(tableName, FAM_NAM); ClientServiceCallable regionServerCallable = - new ClientServiceCallable(TEST_UTIL.getConnection(), tableName, FAM_NAM, - new RpcControllerFactory(TEST_UTIL.getConfiguration()).newController(), - HConstants.PRIORITY_UNSET) { + new ClientServiceCallable((ConnectionImplementation) TEST_UTIL.getConnection(), + tableName, FAM_NAM, new RpcControllerFactory(TEST_UTIL.getConfiguration()).newController(), + HConstants.PRIORITY_UNSET) { @Override protected Object rpcCall() throws Exception { return null; @@ -126,9 +126,9 @@ public class TestCISleep extends AbstractTestCITimeout { assertTrue(pauseTime <= (baseTime * HConstants.RETRY_BACKOFF[i] * 1.01f)); } - try ( - MasterCallable masterCallable = new MasterCallable(TEST_UTIL.getConnection(), - new RpcControllerFactory(TEST_UTIL.getConfiguration())) { + try (MasterCallable masterCallable = + new MasterCallable((ConnectionImplementation) TEST_UTIL.getConnection(), + new RpcControllerFactory(TEST_UTIL.getConfiguration())) { @Override protected Object rpcCall() throws Exception { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java index d6f32f5483..ae217cd820 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java @@ -94,7 +94,8 @@ public class TestClientPushback { public void testClientTracksServerPushback() throws Exception{ Configuration conf = UTIL.getConfiguration(); - ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(conf); + ConnectionImplementation conn = + (ConnectionImplementation) ConnectionFactory.createConnection(conf); BufferedMutatorImpl mutator = (BufferedMutatorImpl) conn.getBufferedMutator(tableName); HRegionServer rs = UTIL.getHBaseCluster().getRegionServer(0); @@ -182,7 +183,8 @@ public class TestClientPushback { @Test public void testMutateRowStats() throws IOException { Configuration conf = UTIL.getConfiguration(); - ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(conf); + ConnectionImplementation conn = + (ConnectionImplementation) ConnectionFactory.createConnection(conf); Table table = conn.getTable(tableName); HRegionServer rs = UTIL.getHBaseCluster().getRegionServer(0); Region region = rs.getRegions(tableName).get(0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java index 4d9f39bebe..4ac7b53b87 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementation.java @@ -837,7 +837,7 @@ public class TestConnectionImplementation { * from ZK by the client. */ @Test - public void testConnection() throws Exception{ + public void testConnection() throws Exception { // We create an empty config and add the ZK address. Configuration c = new Configuration(); c.set(HConstants.ZOOKEEPER_QUORUM, @@ -846,7 +846,8 @@ public class TestConnectionImplementation { TEST_UTIL.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT)); // This should be enough to connect - ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(c); + ConnectionImplementation conn = + (ConnectionImplementation) ConnectionFactory.createConnection(c); assertTrue(conn.isMasterRunning()); conn.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java index 1315d4afbf..ff151fe6a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java @@ -159,7 +159,7 @@ public class TestFromClientSide3 { // connection needed for poll-wait HRegionLocation loc = locator.getRegionLocation(row, true); AdminProtos.AdminService.BlockingInterface server = - ((ClusterConnection) admin.getConnection()).getAdmin(loc.getServerName()); + ((ConnectionImplementation) admin.getConnection()).getAdmin(loc.getServerName()); byte[] regName = loc.getRegionInfo().getRegionName(); for (int i = 0; i < nFlushes; i++) { @@ -289,7 +289,7 @@ public class TestFromClientSide3 { final TableName tableName = TableName.valueOf(name.getMethodName()); Table hTable = TEST_UTIL.createTable(tableName, FAMILY, 10); Admin admin = TEST_UTIL.getAdmin(); - ClusterConnection connection = (ClusterConnection) TEST_UTIL.getConnection(); + ConnectionImplementation connection = (ConnectionImplementation) TEST_UTIL.getConnection(); // Create 3 store files. byte[] row = Bytes.toBytes(random.nextInt()); @@ -681,7 +681,7 @@ public class TestFromClientSide3 { @Test public void testConnectionDefaultUsesCodec() throws Exception { - ClusterConnection con = (ClusterConnection) TEST_UTIL.getConnection(); + ConnectionImplementation con = (ConnectionImplementation) TEST_UTIL.getConnection(); assertTrue(con.hasCellBlockSupport()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java index 53f5064e43..108ab7f3f4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaTableAccessorNoCluster.java @@ -107,15 +107,13 @@ public class TestMetaTableAccessorNoCluster { Result r = Result.create(kvs); assertNull(MetaTableAccessor.getRegionInfo(r)); - byte [] f = HConstants.CATALOG_FAMILY; + byte[] f = HConstants.CATALOG_FAMILY; // Make a key value that doesn't have the expected qualifier. - kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, - HConstants.SERVER_QUALIFIER, f)); + kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.SERVER_QUALIFIER, f)); r = Result.create(kvs); assertNull(MetaTableAccessor.getRegionInfo(r)); // Make a key that does not have a regioninfo value. - kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, - HConstants.REGIONINFO_QUALIFIER, f)); + kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY, f, HConstants.REGIONINFO_QUALIFIER, f)); RegionInfo hri = MetaTableAccessor.getRegionInfo(Result.create(kvs)); assertTrue(hri == null); // OK, give it what it expects @@ -161,7 +159,7 @@ public class TestMetaTableAccessorNoCluster { RegionInfo.toByteArray(RegionInfoBuilder.FIRST_META_REGIONINFO))); kvs.add(new KeyValue(rowToVerify, HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, - Bytes.toBytes(sn.getHostAndPort()))); + Bytes.toBytes(sn.getAddress().toString()))); kvs.add(new KeyValue(rowToVerify, HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn.getStartcode()))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java index df3088ceba..0415102663 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java @@ -390,11 +390,9 @@ public class TestMetaWithReplicas { @Ignore @Test // Disabled. Relies on FSCK which needs work for AMv2. public void testHBaseFsckWithFewerMetaReplicas() throws Exception { - ClusterConnection c = (ClusterConnection)ConnectionFactory.createConnection( - TEST_UTIL.getConfiguration()); RegionLocations rl = new RegionLocations(getMetaRegionLocations()); - HBaseFsckRepair.closeRegionSilentlyAndWait(c, - rl.getRegionLocation(1).getServerName(), rl.getRegionLocation(1).getRegionInfo()); + HBaseFsckRepair.closeRegionSilentlyAndWait(TEST_UTIL.getConnection(), + rl.getRegionLocation(1).getServerName(), rl.getRegionLocation(1).getRegionInfo()); // check that problem exists HBaseFsck hbck = doFsck(TEST_UTIL.getConfiguration(), false); assertErrors(hbck, new ERROR_CODE[]{ERROR_CODE.UNKNOWN,ERROR_CODE.NO_META_REGION}); @@ -407,11 +405,9 @@ public class TestMetaWithReplicas { @Ignore @Test // The close silently doesn't work any more since HBASE-14614. Fix. public void testHBaseFsckWithFewerMetaReplicaZnodes() throws Exception { - ClusterConnection c = (ClusterConnection)ConnectionFactory.createConnection( - TEST_UTIL.getConfiguration()); RegionLocations rl = new RegionLocations(getMetaRegionLocations()); - HBaseFsckRepair.closeRegionSilentlyAndWait(c, - rl.getRegionLocation(2).getServerName(), rl.getRegionLocation(2).getRegionInfo()); + HBaseFsckRepair.closeRegionSilentlyAndWait(TEST_UTIL.getConnection(), + rl.getRegionLocation(2).getServerName(), rl.getRegionLocation(2).getRegionInfo()); ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); ZKUtil.deleteNode(zkw, zkw.getZNodePaths().getZNodeForReplica(2)); // check that problem exists @@ -485,20 +481,17 @@ public class TestMetaWithReplicas { public void testShutdownOfReplicaHolder() throws Exception { // checks that the when the server holding meta replica is shut down, the meta replica // can be recovered - try (ClusterConnection conn = (ClusterConnection) - ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { - HRegionLocation hrl = getMetaRegionLocations().get(1); - ServerName oldServer = hrl.getServerName(); - TEST_UTIL.getHBaseClusterInterface().killRegionServer(oldServer); - int i = 0; - do { - LOG.debug("Waiting for the replica " + hrl.getRegionInfo() + " to come up"); - Thread.sleep(10000); //wait for the detection/recovery - hrl = getMetaRegionLocations().get(1); - i++; - } while ((hrl == null || hrl.getServerName().equals(oldServer)) && i < 3); - assertTrue(i != 3); - } + HRegionLocation hrl = getMetaRegionLocations().get(1); + ServerName oldServer = hrl.getServerName(); + TEST_UTIL.getHBaseClusterInterface().killRegionServer(oldServer); + int i = 0; + do { + LOG.debug("Waiting for the replica " + hrl.getRegionInfo() + " to come up"); + Thread.sleep(10000); // wait for the detection/recovery + hrl = getMetaRegionLocations().get(1); + i++; + } while ((hrl == null || hrl.getServerName().equals(oldServer)) && i < 3); + assertTrue(i != 3); } @Ignore @Test // Disabled because fsck and this needs work for AMv2 diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java index 7d36e99583..50c9bd8d9e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java @@ -553,7 +553,7 @@ public class TestMultiParallel { }; NonceGenerator oldCnm = - ConnectionUtils.injectNonceGeneratorForTesting((ClusterConnection)connection, cnm); + ConnectionUtils.injectNonceGeneratorForTesting((ConnectionImplementation) connection, cnm); // First test sequential requests. try { @@ -615,7 +615,8 @@ public class TestMultiParallel { validateResult(result, QUALIFIER, Bytes.toBytes((numRequests / 2) + 1L)); table.close(); } finally { - ConnectionImplementation.injectNonceGeneratorForTesting((ClusterConnection) connection, oldCnm); + ConnectionImplementation.injectNonceGeneratorForTesting((ConnectionImplementation) connection, + oldCnm); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java index c26c969478..5f489a2028 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java @@ -571,7 +571,7 @@ public class TestReplicasClient { LOG.info("get works and is not stale done"); //reset - ClusterConnection connection = (ClusterConnection) HTU.getConnection(); + ConnectionImplementation connection = (ConnectionImplementation) HTU.getConnection(); Counter hedgedReadOps = connection.getConnectionMetrics().hedgedReadOps; Counter hedgedReadWin = connection.getConnectionMetrics().hedgedReadWin; hedgedReadOps.dec(hedgedReadOps.getCount()); @@ -638,7 +638,7 @@ public class TestReplicasClient { Thread.sleep(1000 + REFRESH_PERIOD * 2); - AsyncProcess ap = ((ClusterConnection) HTU.getConnection()).getAsyncProcess(); + AsyncProcess ap = ((ConnectionImplementation) HTU.getConnection()).getAsyncProcess(); // Make primary slowdown SlowMeCopro.getPrimaryCdl().set(new CountDownLatch(1)); @@ -654,16 +654,14 @@ public class TestReplicasClient { gets.add(g); Object[] results = new Object[2]; - int operationTimeout = ((ClusterConnection) HTU.getConnection()).getConnectionConfiguration().getOperationTimeout(); - int readTimeout = ((ClusterConnection) HTU.getConnection()).getConnectionConfiguration().getReadRpcTimeout(); - AsyncProcessTask task = AsyncProcessTask.newBuilder() - .setPool(HTable.getDefaultExecutor(HTU.getConfiguration())) - .setTableName(table.getName()) - .setRowAccess(gets) - .setResults(results) - .setOperationTimeout(operationTimeout) - .setRpcTimeout(readTimeout) - .build(); + int operationTimeout = ((ConnectionImplementation) HTU.getConnection()) + .getConnectionConfiguration().getOperationTimeout(); + int readTimeout = ((ConnectionImplementation) HTU.getConnection()) + .getConnectionConfiguration().getReadRpcTimeout(); + AsyncProcessTask task = + AsyncProcessTask.newBuilder().setPool(HTable.getDefaultExecutor(HTU.getConfiguration())) + .setTableName(table.getName()).setRowAccess(gets).setResults(results) + .setOperationTimeout(operationTimeout).setRpcTimeout(readTimeout).build(); AsyncRequestFuture reqs = ap.submit(task); reqs.waitUntilDone(); // verify we got the right results back diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java index 93c6b4d2d2..b9c16f3d3e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java @@ -206,7 +206,7 @@ public class TestSeparateClientZKCluster { // create table Connection conn = TEST_UTIL.getConnection(); Admin admin = conn.getAdmin(); - HTable table = (HTable) conn.getTable(tn); + Table table = conn.getTable(tn); try { ColumnFamilyDescriptorBuilder cfDescBuilder = ColumnFamilyDescriptorBuilder.newBuilder(family); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java index beaa59be3e..f74338865c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShortCircuitConnection.java @@ -75,7 +75,7 @@ public class TestShortCircuitConnection { htd.addFamily(hcd); UTIL.createTable(htd, null); HRegionServer regionServer = UTIL.getRSForFirstRegionInTable(tableName); - ClusterConnection connection = regionServer.getClusterConnection(); + ConnectionImplementation connection = (ConnectionImplementation) regionServer.getConnection(); Table tableIf = connection.getTable(tableName); assertTrue(tableIf instanceof HTable); HTable table = (HTable) tableIf; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index 3ebad66dc0..c7cd1c13ba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.MasterSwitchType; @@ -161,7 +160,7 @@ public class MockNoopMasterServices implements MasterServices { } @Override - public ClusterConnection getConnection() { + public Connection getConnection() { return null; } @@ -352,11 +351,6 @@ public class MockNoopMasterServices implements MasterServices { return null; } - @Override - public ClusterConnection getClusterConnection() { - return null; - } - @Override public LoadBalancer getLoadBalancer() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 73d53c7d95..7255d2f7da 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; @@ -303,7 +302,7 @@ class MockRegionServer implements AdminProtos.AdminService.BlockingInterface, } @Override - public ClusterConnection getConnection() { + public Connection getConnection() { return null; } @@ -618,11 +617,6 @@ class MockRegionServer implements AdminProtos.AdminService.BlockingInterface, return 0; } - @Override - public ClusterConnection getClusterConnection() { - return null; - } - @Override public ThroughputController getFlushThroughputController() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java index 77667a7791..7f9605e696 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -313,7 +312,7 @@ public class TestActiveMasterManager { } @Override - public ClusterConnection getConnection() { + public Connection getConnection() { return null; } @@ -330,12 +329,6 @@ public class TestActiveMasterManager { return null; } - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } - @Override public FileSystem getFileSystem() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java index a0aae32095..0deea15540 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java @@ -18,16 +18,12 @@ package org.apache.hadoop.hbase.master; import static org.junit.Assert.fail; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import java.net.InetAddress; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClockOutOfSyncException; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.client.ClusterConnection; -import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; @@ -51,14 +47,7 @@ public class TestClockSkewDetection { @Test public void testClockSkewDetection() throws Exception { final Configuration conf = HBaseConfiguration.create(); - ServerManager sm = new ServerManager(new MockNoopMasterServices(conf) { - @Override - public ClusterConnection getClusterConnection() { - ClusterConnection conn = mock(ClusterConnection.class); - when(conn.getRpcControllerFactory()).thenReturn(mock(RpcControllerFactory.class)); - return conn; - } - }); + ServerManager sm = new ServerManager(new MockNoopMasterServices(conf)); LOG.debug("regionServerStartup 1"); InetAddress ia1 = InetAddress.getLocalHost(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java index e73ba75375..4f03108b5d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.ServerMetricsBuilder; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.replication.ReplicationException; @@ -184,7 +184,7 @@ public class TestMasterNoCluster { // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than // the conf from the master; the conf will already have an ClusterConnection // associate so the below mocking of a connection will fail. - final ClusterConnection mockedConnection = HConnectionTestingUtility.getMockedConnectionAndDecorate( + final Connection mockedConnection = HConnectionTestingUtility.getMockedConnectionAndDecorate( TESTUTIL.getConfiguration(), rs0, rs0, rs0.getServerName(), HRegionInfo.FIRST_META_REGIONINFO); HMaster master = new HMaster(conf) { @@ -212,12 +212,7 @@ public class TestMasterNoCluster { } @Override - public ClusterConnection getConnection() { - return mockedConnection; - } - - @Override - public ClusterConnection getClusterConnection() { + public Connection getConnection() { return mockedConnection; } }; @@ -281,7 +276,7 @@ public class TestMasterNoCluster { } @Override - public ClusterConnection getConnection() { + public Connection getConnection() { // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than // the conf from the master; the conf will already have a Connection // associate so the below mocking of a connection will fail. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java index 56467cc6d6..ef64c940c3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java @@ -31,8 +31,8 @@ import org.apache.hadoop.hbase.ServerMetricsBuilder; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -87,7 +87,7 @@ public class MockMasterServices extends MockNoopMasterServices { private MasterProcedureEnv procedureEnv; private ProcedureExecutor procedureExecutor; private ProcedureStore procedureStore; - private final ClusterConnection connection; + private final Connection connection; private final LoadBalancer balancer; private final ServerManager serverManager; @@ -284,7 +284,7 @@ public class MockMasterServices extends MockNoopMasterServices { } @Override - public ClusterConnection getConnection() { + public Connection getConnection() { return this.connection; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java index c5fad32aa5..3d6466dde0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -228,7 +227,7 @@ public class TestHFileCleaner { } @Override - public ClusterConnection getConnection() { + public Connection getConnection() { return null; } @@ -260,12 +259,6 @@ public class TestHFileCleaner { return null; } - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } - @Override public FileSystem getFileSystem() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java index fd11ff8598..82c8684222 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -164,7 +163,7 @@ public class TestHFileLinkCleaner { } @Override - public ClusterConnection getConnection() { + public Connection getConnection() { return null; } @@ -193,13 +192,6 @@ public class TestHFileLinkCleaner { public ChoreService getChoreService() { return null; } - - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } - @Override public FileSystem getFileSystem() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java index 6a5fe9c05b..7434b888f6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java @@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.replication.ReplicationException; @@ -363,7 +362,7 @@ public class TestLogsCleaner { } @Override - public ClusterConnection getConnection() { + public Connection getConnection() { return null; } @@ -393,11 +392,6 @@ public class TestLogsCleaner { return null; } - @Override - public ClusterConnection getClusterConnection() { - return null; - } - @Override public FileSystem getFileSystem() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java index 9791643ac0..b16d3778fa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.replication.ReplicationException; @@ -252,7 +251,7 @@ public class TestReplicationHFileCleaner { } @Override - public ClusterConnection getConnection() { + public Connection getConnection() { return null; } @@ -284,12 +283,6 @@ public class TestReplicationHFileCleaner { return null; } - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } - @Override public FileSystem getFileSystem() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java index 98c3978143..6457964cc1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java @@ -359,11 +359,11 @@ public class MasterProcedureTestingUtility { // Procedure Helpers // ========================================================================== public static long generateNonceGroup(final HMaster master) { - return master.getClusterConnection().getNonceGenerator().getNonceGroup(); + return master.getAsyncClusterConnection().getNonceGenerator().getNonceGroup(); } public static long generateNonce(final HMaster master) { - return master.getClusterConnection().getNonceGenerator().newNonce(); + return master.getAsyncClusterConnection().getNonceGenerator().newNonce(); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java index fd02cf447b..af8cfb8df3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java @@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; @@ -48,8 +48,8 @@ import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext; import org.apache.hadoop.hbase.StartMiniClusterOption; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.ClientServiceCallable; -import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.RpcRetryingCaller; @@ -66,7 +66,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; -import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.wal.TestWALActionsListener; @@ -89,10 +88,6 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; - /** * Tests bulk loading of HFiles and shows the atomicity or lack of atomicity of * the region server's bullkLoad functionality. @@ -214,29 +209,17 @@ public class TestHRegionServerBulkLoad { } // bulk load HFiles BulkLoadHFiles.create(UTIL.getConfiguration()).bulkLoad(tableName, family2Files); + final Connection conn = UTIL.getConnection(); // Periodically do compaction to reduce the number of open file handles. if (numBulkLoads.get() % 5 == 0) { RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(conf); RpcRetryingCaller caller = factory. newCaller(); // 5 * 50 = 250 open file handles! - ClientServiceCallable callable = - new ClientServiceCallable(UTIL.getConnection(), tableName, Bytes.toBytes("aaa"), - new RpcControllerFactory(UTIL.getConfiguration()).newController(), - HConstants.PRIORITY_UNSET) { - @Override - protected Void rpcCall() throws Exception { - LOG.debug( - "compacting " + getLocation() + " for row " + Bytes.toStringBinary(getRow())); - AdminProtos.AdminService.BlockingInterface server = - ((ClusterConnection) UTIL.getConnection()).getAdmin(getLocation().getServerName()); - CompactRegionRequest request = RequestConverter.buildCompactRegionRequest( - getLocation().getRegionInfo().getRegionName(), true, null); - server.compactRegion(null, request); - numCompactions.incrementAndGet(); - return null; - } - }; - caller.callWithRetries(callable, Integer.MAX_VALUE); + try (RegionLocator locator = conn.getRegionLocator(tableName)) { + HRegionLocation loc = locator.getRegionLocation(Bytes.toBytes("aaa"), true); + conn.getAdmin().compactRegion(loc.getRegion().getRegionName()); + numCompactions.incrementAndGet(); + } } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldClient.java index de01401d26..9b892395cc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldClient.java @@ -18,25 +18,24 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; -import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.Map; +import java.util.TreeMap; import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.ClientServiceCallable; -import org.apache.hadoop.hbase.client.ClusterConnection; -import org.apache.hadoop.hbase.client.RpcRetryingCaller; -import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; -import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.tool.BulkLoadHFiles; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; import org.junit.ClassRule; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; @@ -46,11 +45,6 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest; - /** * Tests bulk loading of HFiles with old non-secure client for backward compatibility. Will be * removed when old non-secure client for backward compatibility is not supported. @@ -90,55 +84,26 @@ public class TestHRegionServerBulkLoadWithOldClient extends TestHRegionServerBul // create HFiles for different column families FileSystem fs = UTIL.getTestFileSystem(); byte[] val = Bytes.toBytes(String.format("%010d", iteration)); - final List> famPaths = new ArrayList<>(NUM_CFS); + Map> family2Files = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (int i = 0; i < NUM_CFS; i++) { Path hfile = new Path(dir, family(i)); byte[] fam = Bytes.toBytes(family(i)); createHFile(fs, hfile, fam, QUAL, val, 1000); - famPaths.add(new Pair<>(fam, hfile.toString())); + family2Files.put(fam, Collections.singletonList(hfile)); } // bulk load HFiles - final ClusterConnection conn = (ClusterConnection) UTIL.getAdmin().getConnection(); - RpcControllerFactory rpcControllerFactory = new RpcControllerFactory(UTIL.getConfiguration()); - ClientServiceCallable callable = - new ClientServiceCallable(conn, tableName, - Bytes.toBytes("aaa"), rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { - @Override - protected Void rpcCall() throws Exception { - LOG.info("Non-secure old client"); - byte[] regionName = getLocation().getRegionInfo().getRegionName(); - BulkLoadHFileRequest request = - RequestConverter - .buildBulkLoadHFileRequest(famPaths, regionName, true, null, null); - getStub().bulkLoadHFile(null, request); - return null; - } - }; - RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(conf); - RpcRetryingCaller caller = factory. newCaller(); - caller.callWithRetries(callable, Integer.MAX_VALUE); - + BulkLoadHFiles.create(UTIL.getConfiguration()).bulkLoad(tableName, family2Files); + final Connection conn = UTIL.getConnection(); // Periodically do compaction to reduce the number of open file handles. if (numBulkLoads.get() % 5 == 0) { // 5 * 50 = 250 open file handles! - callable = new ClientServiceCallable(conn, tableName, - Bytes.toBytes("aaa"), rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { - @Override - protected Void rpcCall() throws Exception { - LOG.debug("compacting " + getLocation() + " for row " - + Bytes.toStringBinary(getRow())); - AdminProtos.AdminService.BlockingInterface server = - conn.getAdmin(getLocation().getServerName()); - CompactRegionRequest request = - RequestConverter.buildCompactRegionRequest( - getLocation().getRegionInfo().getRegionName(), true, null); - server.compactRegion(null, request); - numCompactions.incrementAndGet(); - return null; - } - }; - caller.callWithRetries(callable, Integer.MAX_VALUE); + // 5 * 50 = 250 open file handles! + try (RegionLocator locator = conn.getRegionLocator(tableName)) { + HRegionLocation loc = locator.getRegionLocation(Bytes.toBytes("aaa"), true); + conn.getAdmin().compactRegion(loc.getRegion().getRegionName()); + numCompactions.incrementAndGet(); + } } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java index 4a359e4bc3..7c6598d3e5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; @@ -829,7 +828,7 @@ public class TestHeapMemoryManager { } @Override - public ClusterConnection getConnection() { + public Connection getConnection() { return null; } @@ -843,12 +842,6 @@ public class TestHeapMemoryManager { return null; } - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } - @Override public FileSystem getFileSystem() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java index 43da8460e6..b52bf1977e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.SplitLogCounters; import org.apache.hadoop.hbase.SplitLogTask; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager; import org.apache.hadoop.hbase.executor.ExecutorService; @@ -132,7 +131,7 @@ public class TestSplitLogWorker { } @Override - public ClusterConnection getConnection() { + public Connection getConnection() { return null; } @@ -141,12 +140,6 @@ public class TestSplitLogWorker { return null; } - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } - @Override public FileSystem getFileSystem() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java index 9e9d1d6deb..82b28439bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; @@ -470,7 +469,7 @@ public class TestWALLockup { } @Override - public ClusterConnection getConnection() { + public Connection getConnection() { return null; } @@ -505,11 +504,6 @@ public class TestWALLockup { return null; } - @Override - public ClusterConnection getClusterConnection() { - return null; - } - @Override public FileSystem getFileSystem() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java index 62ab2659d1..9d3283d3fd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; @@ -209,7 +208,7 @@ public class TestReplicationTrackerZKImpl { } @Override - public ClusterConnection getConnection() { + public Connection getConnection() { return null; } @@ -244,12 +243,6 @@ public class TestReplicationTrackerZKImpl { return null; } - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } - @Override public FileSystem getFileSystem() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java index 427f3197d6..3a1320cf66 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java @@ -58,7 +58,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.RegionInfo; @@ -852,8 +851,9 @@ public abstract class TestReplicationSourceManager { public CoordinatedStateManager getCoordinatedStateManager() { return null; } + @Override - public ClusterConnection getConnection() { + public Connection getConnection() { return null; } @@ -887,12 +887,6 @@ public abstract class TestReplicationSourceManager { return null; } - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } - @Override public FileSystem getFileSystem() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java index 92c8e5422d..a2981fbb74 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices; @@ -211,7 +210,7 @@ public class TestTokenAuthentication { } @Override - public ClusterConnection getConnection() { + public Connection getConnection() { return null; } @@ -354,12 +353,6 @@ public class TestTokenAuthentication { return null; } - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } - @Override public Connection createConnection(Configuration conf) throws IOException { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java index d25ccef83e..9e29763a2d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java @@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; @@ -74,9 +73,6 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; - /** * This is the base class for HBaseFsck's ability to detect reasons for inconsistent tables. * @@ -98,7 +94,7 @@ public class BaseTestHBaseFsck { protected static RegionStates regionStates; protected static ExecutorService tableExecutorService; protected static ScheduledThreadPoolExecutor hbfsckExecutorService; - protected static ClusterConnection connection; + protected static Connection connection; protected static Admin admin; // for the instance, reset every test run @@ -298,9 +294,6 @@ public class BaseTestHBaseFsck { /** * delete table in preparation for next test - * - * @param tablename - * @throws IOException */ void cleanupTable(TableName tablename) throws Exception { if (tbl != null) { @@ -319,10 +312,8 @@ public class BaseTestHBaseFsck { Collection regionServers = status.getLiveServerMetrics().keySet(); Map> mm = new HashMap<>(); for (ServerName hsi : regionServers) { - AdminProtos.AdminService.BlockingInterface server = connection.getAdmin(hsi); - // list all online regions from this region server - List regions = ProtobufUtil.getOnlineRegions(server); + List regions = admin.getRegions(hsi); List regionNames = new ArrayList<>(regions.size()); for (RegionInfo hri : regions) { regionNames.add(hri.getRegionNameAsString()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java index 13212d20b2..380c1c7d7d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.log.HBaseMarkers; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -104,7 +103,7 @@ public class MockServer implements Server { } @Override - public ClusterConnection getConnection() { + public Connection getConnection() { return null; } @@ -115,7 +114,6 @@ public class MockServer implements Server { @Override public boolean isAborted() { - // TODO Auto-generated method stub return this.aborted; } @@ -124,12 +122,6 @@ public class MockServer implements Server { return null; } - @Override - public ClusterConnection getClusterConnection() { - // TODO Auto-generated method stub - return null; - } - @Override public FileSystem getFileSystem() { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java index 0a66ec06f9..f245384083 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; @@ -56,7 +56,7 @@ public abstract class MultiThreadedAction { protected final TableName tableName; protected final Configuration conf; - protected final ClusterConnection connection; // all reader / writer threads will share this connection + protected final Connection connection; // all reader / writer threads will share this connection protected int numThreads = 1; @@ -151,7 +151,7 @@ public abstract class MultiThreadedAction { this.dataGenerator = dataGen; this.tableName = tableName; this.actionLetter = actionLetter; - this.connection = (ClusterConnection) ConnectionFactory.createConnection(conf); + this.connection = ConnectionFactory.createConnection(conf); } public void start(long startKey, long endKey, int numThreads) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java index 0855559d57..f60ebb624f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.io.hfile.TestHFile; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; @@ -74,7 +73,7 @@ public class TestHBaseFsckMOB extends BaseTestHBaseFsck { TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager(); regionStates = assignmentManager.getRegionStates(); - connection = (ClusterConnection) TEST_UTIL.getConnection(); + connection = TEST_UTIL.getConnection(); admin = connection.getAdmin(); admin.setBalancerRunning(false, true); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java index 36e513c157..abaaba0242 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftConnection.java @@ -29,9 +29,7 @@ import java.util.Arrays; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ExecutorService; - import javax.net.ssl.SSLException; - import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java index 158361958f..24a56a584c 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java @@ -41,11 +41,11 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; @@ -143,7 +143,7 @@ public class TestThriftConnection { private static Connection createConnection(int port, boolean useHttp) throws IOException { Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); - conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, + conf.set(ConnectionUtils.HBASE_CLIENT_CONNECTION_IMPL, ThriftConnection.class.getName()); if (useHttp) { conf.set(Constants.HBASE_THRIFT_CLIENT_BUIDLER_CLASS, -- 2.17.1