commit 72f53e5dabba3b8e0bfbb51bb6514a9342dae034 Author: stack Date: Sat Nov 22 05:20:08 2014 -0800 patch diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 3282838..36cc67a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -34,11 +34,13 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -173,13 +175,18 @@ public class MetaTableAccessor { * @throws IOException * @SuppressWarnings("deprecation") */ - private static Table getHTable(final Connection connection, - final TableName tableName) + private static Table getHTable(final Connection connection, final TableName tableName) throws IOException { // We used to pass whole CatalogTracker in here, now we just pass in Connection if (connection == null || connection.isClosed()) { throw new NullPointerException("No connection"); } + // If the passed in 'connection' is 'managed' -- i.e. every second test uses + // an HTable or an HBaseAdmin with managed connections -- then doing + // connection.getTable will throw an exception saying you are NOT to use + // managed connections getting tables. Leaving this as it is for now. Will + // revisit when inclined to change all tests. User code probaby makes use of + // managed connections too so don't change it till post hbase 1.0. return new HTable(tableName, connection); } @@ -216,8 +223,7 @@ public class MetaTableAccessor { * @deprecated use {@link #getRegionLocation(Connection, byte[])} instead */ @Deprecated - public static Pair getRegion( - Connection connection, byte [] regionName) + public static Pair getRegion(Connection connection, byte [] regionName) throws IOException { HRegionLocation location = getRegionLocation(connection, regionName); return location == null @@ -886,12 +892,24 @@ public class MetaTableAccessor { * @throws IOException */ public static int getRegionCount(final Configuration c, final TableName tableName) - throws IOException { - HTable t = new HTable(c, tableName); - try { - return t.getRegionLocations().size(); - } finally { - t.close(); + throws IOException { + try (Connection connection = ConnectionFactory.createConnection(c)) { + return getRegionCount(connection, tableName); + } + } + + /** + * Count regions in hbase:meta for passed table. + * @param connection Connection object + * @param tableName table name to count regions for + * @return Count or regions in table tableName + * @throws IOException + */ + public static int getRegionCount(final Connection connection, final TableName tableName) + throws IOException { + try (RegionLocator locator = connection.getRegionLocator(tableName)) { + List locations = locator.getAllRegionLocations(); + return locations == null? 0: locations.size(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java index 80fa14d..d8856ad 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java @@ -36,21 +36,18 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; /** - * An internal class that adapts a {@link HConnection}. - * HConnection is created from HConnectionManager. The default - * implementation talks to region servers over RPC since it - * doesn't know if the connection is used by one region server - * itself. This adapter makes it possible to change some of the - * default logic. Especially, when the connection is used - * internally by some the region server. + * An internal class that delegates to an {@link HConnection} instance. + * A convenience to override when customizing method implementations. + * * * @see ConnectionUtils#createShortCircuitHConnection(HConnection, ServerName, - * AdminService.BlockingInterface, ClientService.BlockingInterface) + * AdminService.BlockingInterface, ClientService.BlockingInterface) for case where we make + * Connections skip RPC if request is to local server. */ @InterfaceAudience.Private @SuppressWarnings("deprecation") //NOTE: DO NOT make this class public. It was made package-private on purpose. -class ConnectionAdapter implements ClusterConnection { +abstract class ConnectionAdapter implements ClusterConnection { private final ClusterConnection wrappedConnection; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index 374ce28..b489af2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.security.UserProvider; * A non-instantiable class that manages creation of {@link Connection}s. * Managing the lifecycle of the {@link Connection}s to the cluster is the responsibility of * the caller. - * From this {@link Connection} {@link Table} implementations are retrieved + * From a {@link Connection}, {@link Table} implementations are retrieved * with {@link Connection#getTable(TableName)}. Example: *
  * Connection connection = ConnectionFactory.createConnection(config);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index bfdf5d2..efeb688 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -180,7 +180,7 @@ import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 
 /**
- * An internal, A non-instantiable class that manages creation of {@link HConnection}s.
+ * An internal, non-instantiable class that manages creation of {@link HConnection}s.
  */
 @SuppressWarnings("serial")
 @InterfaceAudience.Private
@@ -774,16 +774,7 @@ class ConnectionManager {
      * @throws IOException
      */
     private Registry setupRegistry() throws IOException {
-      String registryClass = this.conf.get("hbase.client.registry.impl",
-        ZooKeeperRegistry.class.getName());
-      Registry registry = null;
-      try {
-        registry = (Registry)Class.forName(registryClass).newInstance();
-      } catch (Throwable t) {
-        throw new IOException(t);
-      }
-      registry.init(this);
-      return registry;
+      return RegistryFactory.getRegistry(this);
     }
 
     /**
@@ -1010,8 +1001,8 @@ class ConnectionManager {
     @Override
     public List locateRegions(final TableName tableName,
         final boolean useCache, final boolean offlined) throws IOException {
-      NavigableMap regions = MetaScanner.allTableRegions(conf, this,
-          tableName);
+      NavigableMap regions =
+        MetaScanner.allTableRegions(conf, this, tableName);
       final List locations = new ArrayList();
       for (HRegionInfo regionInfo : regions.keySet()) {
         RegionLocations list = locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index e26ae48..32e1678 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -104,14 +104,14 @@ public class ConnectionUtils {
 
   /**
    * Adapt a HConnection so that it can bypass the RPC layer (serialization,
-   * deserialization, networking, etc..) when it talks to a local server.
+   * deserialization, networking, etc..) -- i.e. short-circuit -- when talking to a local server.
    * @param conn the connection to adapt
    * @param serverName the local server name
    * @param admin the admin interface of the local server
    * @param client the client interface of the local server
    * @return an adapted/decorated HConnection
    */
-  public static HConnection createShortCircuitHConnection(final HConnection conn,
+  public static ClusterConnection createShortCircuitHConnection(final Connection conn,
       final ServerName serverName, final AdminService.BlockingInterface admin,
       final ClientService.BlockingInterface client) {
     return new ConnectionAdapter(conn) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index c3a94e3..2c21838 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -80,24 +80,24 @@ import com.google.protobuf.Service;
 import com.google.protobuf.ServiceException;
 
 /**
- *
- * HTable is no longer a client API. It is marked InterfaceAudience.Private indicating that
- * this is an HBase-internal class as defined in
- * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
- * There are no guarantees for backwards source / binary compatibility and methods or class can
- * change or go away without deprecation. Use {@link Connection#getTable(TableName)}
- * to obtain an instance of {@link Table} instead of constructing an HTable directly.
- * 

An implementation of {@link Table}. Used to communicate with a single HBase table. + * An implementation of {@link Table}. Used to communicate with a single HBase table. * Lightweight. Get as needed and just close when done. * Instances of this class SHOULD NOT be constructed directly. * Obtain an instance via {@link Connection}. See {@link ConnectionFactory} * class comment for an example of how. * - *

This class is NOT thread safe for reads nor write. + *

This class is NOT thread safe for reads nor writes. * In the case of writes (Put, Delete), the underlying write buffer can * be corrupted if multiple threads contend over a single HTable instance. * In the case of reads, some fields used by a Scan are shared among all threads. * + *

HTable is no longer a client API. Use {@link Table} instead. It is marked + * InterfaceAudience.Private indicating that this is an HBase-internal class as defined in + * Hadoop + * Interface Classification + * There are no guarantees for backwards source / binary compatibility and methods or class can + * change or go away without deprecation. + * * @see Table * @see Admin * @see Connection @@ -163,8 +163,6 @@ public class HTable implements HTableInterface, RegionLocator { this(conf, TableName.valueOf(tableName)); } - - /** * Creates an object to access a HBase table. * @param conf Configuration object to use. @@ -291,6 +289,8 @@ public class HTable implements HTableInterface, RegionLocator { /** * Creates an object to access a HBase table. + * Used by HBase internally. DO NOT USE. See {@link ConnectionFactory} class comment for how to + * get a {@link Table} instance (use {@link Table} instead of {@link HTable}). * @param tableName Name of the table. * @param connection HConnection to be used. * @param pool ExecutorService to be used. @@ -1794,20 +1794,6 @@ public class HTable implements HTableInterface, RegionLocator { } /** - * Run basic test. - * @param args Pass table name and row and will get the content. - * @throws IOException - */ - public static void main(String[] args) throws IOException { - Table t = new HTable(HBaseConfiguration.create(), args[0]); - try { - System.out.println(t.get(new Get(Bytes.toBytes(args[1])))); - } finally { - t.close(); - } - } - - /** * {@inheritDoc} */ @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java index 5312dfb..e171f4a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java @@ -41,6 +41,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ExceptionUtil; +import com.google.common.annotations.VisibleForTesting; + /** * Scanner class that contains the hbase:meta table scanning logic. * Provided visitors will be called for each row. @@ -59,13 +61,16 @@ public class MetaScanner { /** * Scans the meta table and calls a visitor on each RowResult and uses a empty * start row value as table name. + * + *

Visible for testing. Use {@link + * #metaScan(Configuration, Connection, MetaScannerVisitor, TableName)} instead. * * @param configuration conf * @param visitor A custom visitor * @throws IOException e */ - public static void metaScan(Configuration configuration, - MetaScannerVisitor visitor) + @VisibleForTesting // Do not use. Used by tests only and hbck. + public static void metaScan(Configuration configuration, MetaScannerVisitor visitor) throws IOException { metaScan(configuration, visitor, null, null, Integer.MAX_VALUE); } @@ -91,6 +96,9 @@ public class MetaScanner { * Scans the meta table and calls a visitor on each RowResult. Uses a table * name and a row name to locate meta regions. And it only scans at most * rowLimit of rows. + * + *

Visible for testing. Use {@link + * #metaScan(Configuration, Connection, MetaScannerVisitor, TableName)} instead. * * @param configuration HBase configuration. * @param visitor Visitor object. @@ -102,12 +110,12 @@ public class MetaScanner { * will be set to default value Integer.MAX_VALUE. * @throws IOException e */ + @VisibleForTesting // Do not use. Used by Master but by a method that is used testing. public static void metaScan(Configuration configuration, MetaScannerVisitor visitor, TableName userTableName, byte[] row, int rowLimit) throws IOException { - metaScan(configuration, null, visitor, userTableName, row, rowLimit, - TableName.META_TABLE_NAME); + metaScan(configuration, null, visitor, userTableName, row, rowLimit, TableName.META_TABLE_NAME); } /** @@ -133,7 +141,7 @@ public class MetaScanner { throws IOException { boolean closeConnection = false; - if (connection == null){ + if (connection == null) { connection = ConnectionFactory.createConnection(configuration); closeConnection = true; } @@ -141,25 +149,24 @@ public class MetaScanner { int rowUpperLimit = rowLimit > 0 ? rowLimit: Integer.MAX_VALUE; // Calculate startrow for scan. byte[] startRow; - ResultScanner scanner = null; - HTable metaTable = null; - try { - metaTable = new HTable(TableName.META_TABLE_NAME, connection, null); + // If the passed in 'connection' is 'managed' -- i.e. every second test uses + // an HTable or an HBaseAdmin with managed connections -- then doing + // connection.getTable will throw an exception saying you are NOT to use + // managed connections getting tables. Leaving this as it is for now. Will + // revisit when inclined to change all tests. User code probaby makes use of + // managed connections too so don't change it till post hbase 1.0. + try (Table metaTable = new HTable(TableName.META_TABLE_NAME, connection, null)) { if (row != null) { // Scan starting at a particular row in a particular table - byte[] searchRow = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false); - - Result startRowResult = metaTable.getRowOrBefore(searchRow, HConstants.CATALOG_FAMILY); - + Result startRowResult = getClosestRowOrBefore(metaTable, tableName, row); if (startRowResult == null) { - throw new TableNotFoundException("Cannot find row in "+ TableName - .META_TABLE_NAME.getNameAsString()+" for table: " - + tableName + ", row=" + Bytes.toStringBinary(searchRow)); + throw new TableNotFoundException("Cannot find row in " + metaTable.getName() + + " for table: " + tableName + ", row=" + Bytes.toStringBinary(row)); } HRegionInfo regionInfo = getHRegionInfo(startRowResult); if (regionInfo == null) { throw new IOException("HRegionInfo was null or empty in Meta for " + - tableName + ", row=" + Bytes.toStringBinary(searchRow)); + tableName + ", row=" + Bytes.toStringBinary(row)); } byte[] rowBefore = regionInfo.getStartKey(); startRow = HRegionInfo.createRegionName(tableName, rowBefore, HConstants.ZEROES, false); @@ -184,25 +191,18 @@ public class MetaScanner { Bytes.toStringBinary(startRow) + " for max=" + rowUpperLimit + " with caching=" + rows); } // Run the scan - scanner = metaTable.getScanner(scan); - Result result; - int processedRows = 0; - while ((result = scanner.next()) != null) { - if (visitor != null) { - if (!visitor.processRow(result)) break; + try (ResultScanner resultScanner = metaTable.getScanner(scan)) { + Result result; + int processedRows = 0; + while ((result = resultScanner.next()) != null) { + if (visitor != null) { + if (!visitor.processRow(result)) break; + } + processedRows++; + if (processedRows >= rowUpperLimit) break; } - processedRows++; - if (processedRows >= rowUpperLimit) break; } } finally { - if (scanner != null) { - try { - scanner.close(); - } catch (Throwable t) { - ExceptionUtil.rethrowIfInterrupt(t); - LOG.debug("Got exception in closing the result scanner", t); - } - } if (visitor != null) { try { visitor.close(); @@ -211,21 +211,27 @@ public class MetaScanner { LOG.debug("Got exception in closing the meta scanner visitor", t); } } - if (metaTable != null) { - try { - metaTable.close(); - } catch (Throwable t) { - ExceptionUtil.rethrowIfInterrupt(t); - LOG.debug("Got exception in closing meta table", t); - } - } if (closeConnection) { - connection.close(); + if (connection != null) connection.close(); } } } /** + * @return Get closest metatable region row to passed row + * @throws IOException + */ + private static Result getClosestRowOrBefore(final Table metaTable, final TableName userTableName, + final byte [] row) + throws IOException { + byte[] searchRow = HRegionInfo.createRegionName(userTableName, row, HConstants.NINES, false); + Scan scan = Scan.createGetClosestRowOrBeforeReverseScan(searchRow); + try (ResultScanner resultScanner = metaTable.getScanner(scan)) { + return resultScanner.next(); + } + } + + /** * Returns HRegionInfo object from the column * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog * table Result. @@ -246,6 +252,7 @@ public class MetaScanner { * @return List of all user-space regions. * @throws IOException */ + @VisibleForTesting // And for hbck. public static List listAllRegions(Configuration conf, final boolean offlined) throws IOException { final List regions = new ArrayList(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java index 8168fe1..842b735 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java @@ -98,4 +98,4 @@ public interface RegionLocator extends Closeable { * Gets the fully qualified table name instance of this table. */ TableName getName(); -} +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java index c6ed801..412e4fa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java @@ -20,11 +20,14 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.classification.InterfaceAudience; /** * Cluster registry. - * Implemenations hold cluster information such as this cluster's id, location of hbase:meta, etc. + * Implementations hold cluster information such as this cluster's id, location of hbase:meta, etc. + * Internal use only. */ +@InterfaceAudience.Private interface Registry { /** * @param connection @@ -47,4 +50,4 @@ interface Registry { * @throws IOException */ int getCurrentNrHRS() throws IOException; -} +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java new file mode 100644 index 0000000..dc2cb7c --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * Get instance of configured Registry. + */ +@InterfaceAudience.Private +class RegistryFactory { + /** + * @return The cluster registry implementation to use. + * @throws IOException + */ + static Registry getRegistry(final Connection connection) + throws IOException { + String registryClass = connection.getConfiguration().get("hbase.client.registry.impl", + ZooKeeperRegistry.class.getName()); + Registry registry = null; + try { + registry = (Registry)Class.forName(registryClass).newInstance(); + } catch (Throwable t) { + throw new IOException(t); + } + registry.init(connection); + return registry; + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index 2aea19f..e4323bf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -52,8 +52,8 @@ import org.apache.hadoop.hbase.util.Bytes; * To scan everything for each row, instantiate a Scan object. *

* To modify scanner caching for just this scan, use {@link #setCaching(int) setCaching}. - * If caching is NOT set, we will use the caching value of the hosting {@link HTable}. See - * {@link HTable#setScannerCaching(int)}. In addition to row caching, it is possible to specify a + * If caching is NOT set, we will use the caching value of the hosting {@link Table}. + * In addition to row caching, it is possible to specify a * maximum result size, using {@link #setMaxResultSize(long)}. When both are used, * single server requests are limited by either number of rows or maximum result size, whichever * limit comes first. @@ -478,7 +478,8 @@ public class Scan extends Query { /** * Set the number of rows for caching that will be passed to scanners. - * If not set, the default setting from {@link HTable#getScannerCaching()} will apply. + * If not set, the Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will + * apply. * Higher caching values will enable faster scanners but will use more memory. * @param caching the number of rows for caching */ @@ -894,4 +895,21 @@ public class Scan extends Query { return (Scan) super.setIsolationLevel(level); } -} + /** + * Utility that creates a Scan that will do a small scan in reverse from passed row + * looking for next closest row. + * @param row + * @param family + * @return An instance of Scan primed with passed row and family to + * scan in reverse for one row only. + */ + static Scan createGetClosestRowOrBeforeReverseScan(byte[] row) { + // Below does not work if you add in family; need to add the family qualifier that is highest + // possible family qualifier. Do we have such a notion? Would have to be magic. + Scan scan = new Scan(row); + scan.setSmall(true); + scan.setReversed(true); + scan.setCaching(1); + return scan; + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java index 85ce4e2..7b7cd16 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client.coprocessor; +import java.io.Closeable; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -36,7 +37,8 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; @@ -72,19 +74,32 @@ import com.google.protobuf.Message; *

  • For methods to find maximum, minimum, sum, rowcount, it returns the * parameter type. For average and std, it returns a double value. For row * count, it returns a long value. + *

    Call {@link #close()} when done. */ @InterfaceAudience.Private -public class AggregationClient { - +public class AggregationClient implements Closeable { + // TODO: This class is not used. Move to examples? private static final Log log = LogFactory.getLog(AggregationClient.class); - Configuration conf; + private final Connection connection; /** * Constructor with Conf object * @param cfg */ public AggregationClient(Configuration cfg) { - this.conf = cfg; + try { + // Create a connection on construction. Will use it making each of the calls below. + this.connection = ConnectionFactory.createConnection(cfg); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public void close() throws IOException { + if (this.connection != null && !this.connection.isClosed()) { + this.connection.close(); + } } /** @@ -101,15 +116,9 @@ public class AggregationClient { */ public R max( final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); + throws Throwable { + try (Table table = connection.getTable(tableName)) { return max(table, ci, scan); - } finally { - if (table != null) { - table.close(); - } } } @@ -196,15 +205,9 @@ public class AggregationClient { */ public R min( final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); + throws Throwable { + try (Table table = connection.getTable(tableName)) { return min(table, ci, scan); - } finally { - if (table != null) { - table.close(); - } } } @@ -276,15 +279,9 @@ public class AggregationClient { */ public long rowCount( final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); - return rowCount(table, ci, scan); - } finally { - if (table != null) { - table.close(); - } + throws Throwable { + try (Table table = connection.getTable(tableName)) { + return rowCount(table, ci, scan); } } @@ -350,15 +347,9 @@ public class AggregationClient { */ public S sum( final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); - return sum(table, ci, scan); - } finally { - if (table != null) { - table.close(); - } + throws Throwable { + try (Table table = connection.getTable(tableName)) { + return sum(table, ci, scan); } } @@ -424,14 +415,8 @@ public class AggregationClient { private Pair getAvgArgs( final TableName tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); - return getAvgArgs(table, ci, scan); - } finally { - if (table != null) { - table.close(); - } + try (Table table = connection.getTable(tableName)) { + return getAvgArgs(table, ci, scan); } } @@ -615,14 +600,8 @@ public class AggregationClient { public double std(final TableName tableName, ColumnInterpreter ci, Scan scan) throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); - return std(table, ci, scan); - } finally { - if (table != null) { - table.close(); - } + try (Table table = connection.getTable(tableName)) { + return std(table, ci, scan); } } @@ -728,14 +707,8 @@ public class AggregationClient { public R median(final TableName tableName, ColumnInterpreter ci, Scan scan) throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); - return median(table, ci, scan); - } finally { - if (table != null) { - table.close(); - } + try (Table table = connection.getTable(tableName)) { + return median(table, ci, scan); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java index edb3c22..8af120f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java @@ -50,9 +50,9 @@ must: method should return a reference to the Endpoint's protocol buffer Service instance. Clients may then call the defined service methods on coprocessor instances via -the {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(byte[])}, -{@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}, and -{@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)} +the {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}, +{@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}, and +{@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)} methods.

    @@ -65,21 +65,21 @@ to identify which regions should be used for the method invocations. Clients can call coprocessor Service methods against either:
    • a single region - calling - {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(byte[])} + {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])} with a single row key. This returns a {@link org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel} instance which communicates with the region containing the given row key (even if the row does not exist) as the RPC endpoint. Clients can then use the {@code CoprocessorRpcChannel} instance in creating a new Service stub to call RPC methods on the region's coprocessor.
    • a range of regions - calling - {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)} - or {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)} + {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)} + or {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)} with a starting row key and an ending row key. All regions in the table from the region containing the start row key to the region containing the end row key (inclusive), will we used as the RPC endpoints.

    -

    Note that the row keys passed as parameters to the HTable +

    Note that the row keys passed as parameters to the Table methods are not passed directly to the coprocessor Service implementations. They are only used to identify the regions for endpoints of the remote calls.

    @@ -160,7 +160,8 @@ use:
    -HTable table = new HTable(conf, "mytable");
    +Connection connection = ConnectionFactory.createConnection(conf);
    +Table table = connection.getTable(TableName.valueOf("mytable"));
     final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
     Map results = table.coprocessorService(
         ExampleProtos.RowCountService.class, // the protocol interface we're invoking
    @@ -186,7 +187,7 @@ of mytable, keyed by the region name.
     By implementing {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call}
     as an anonymous class, we can invoke RowCountService methods
     directly against the {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)}
    -method's argument.  Calling {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}
    +method's argument.  Calling {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}
     will take care of invoking Batch.Call.call() against our anonymous class
     with the RowCountService instance for each table region.
     

    @@ -199,7 +200,8 @@ like to combine row count and key-value count for each region:
    -HTable table = new HTable(conf, "mytable");
    +Connection connection = ConnectionFactory.createConnection(conf);
    +Table table = connection.getTable(TableName.valueOf("mytable"));
     // combine row count and kv count for region
     final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
     Map results = table.coprocessorService(
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
    index e808904..10261cd 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
    @@ -28,23 +28,26 @@ Provides HBase Client
     
      

    Overview

    To administer HBase, create and drop tables, list and alter tables, - use {@link org.apache.hadoop.hbase.client.HBaseAdmin}. Once created, table access is via an instance - of {@link org.apache.hadoop.hbase.client.HTable}. You add content to a table a row at a time. To insert, - create an instance of a {@link org.apache.hadoop.hbase.client.Put} object. Specify value, target column - and optionally a timestamp. Commit your update using {@link org.apache.hadoop.hbase.client.HTable#put(Put)}. - To fetch your inserted value, use {@link org.apache.hadoop.hbase.client.Get}. The Get can be specified to be broad -- get all - on a particular row -- or narrow; i.e. return only a single cell value. After creating an instance of - Get, invoke {@link org.apache.hadoop.hbase.client.HTable#get(Get)}. Use - {@link org.apache.hadoop.hbase.client.Scan} to set up a scanner -- a Cursor- like access. After - creating and configuring your Scan instance, call {@link org.apache.hadoop.hbase.client.HTable#getScanner(Scan)} and then - invoke next on the returned object. Both {@link org.apache.hadoop.hbase.client.HTable#get(Get)} and - {@link org.apache.hadoop.hbase.client.HTable#getScanner(Scan)} return a + use {@link org.apache.hadoop.hbase.client.Admin}. Once created, table access is via an instance + of {@link org.apache.hadoop.hbase.client.Table}. You add content to a table a row at a time. To + insert, create an instance of a {@link org.apache.hadoop.hbase.client.Put} object. Specify value, + target column and optionally a timestamp. Commit your update using + {@link org.apache.hadoop.hbase.client.Table#put(Put)}. + To fetch your inserted value, use {@link org.apache.hadoop.hbase.client.Get}. The Get can be + specified to be broad -- get all on a particular row -- or narrow; i.e. return only a single cell + value. After creating an instance of + Get, invoke {@link org.apache.hadoop.hbase.client.Table#get(Get)}. + +

    Use {@link org.apache.hadoop.hbase.client.Scan} to set up a scanner -- a Cursor- like access. + After creating and configuring your Scan instance, call + {@link org.apache.hadoop.hbase.client.Table#getScanner(Scan)} and then + invoke next on the returned object. Both {@link org.apache.hadoop.hbase.client.Table#get(Get)} + and {@link org.apache.hadoop.hbase.client.Table#getScanner(Scan)} return a {@link org.apache.hadoop.hbase.client.Result}. -A Result is a List of {@link org.apache.hadoop.hbase.KeyValue}s. It has facility for packaging the return -in different formats. - Use {@link org.apache.hadoop.hbase.client.Delete} to remove content. + +

    Use {@link org.apache.hadoop.hbase.client.Delete} to remove content. You can remove individual cells or entire families, etc. Pass it to - {@link org.apache.hadoop.hbase.client.HTable#delete(Delete)} to execute. + {@link org.apache.hadoop.hbase.client.Table#delete(Delete)} to execute.

    Puts, Gets and Deletes take out a lock on the target row for the duration of their operation. Concurrent modifications to a single row are serialized. Gets and scans run concurrently without @@ -68,8 +71,11 @@ in different formats. import java.io.IOException; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -87,80 +93,97 @@ public class MyLittleHBaseClient { // be found on the CLASSPATH Configuration config = HBaseConfiguration.create(); - // This instantiates an HTable object that connects you to - // the "myLittleHBaseTable" table. - HTable table = new HTable(config, "myLittleHBaseTable"); - - // To add to a row, use Put. A Put constructor takes the name of the row - // you want to insert into as a byte array. In HBase, the Bytes class has - // utility for converting all kinds of java types to byte arrays. In the - // below, we are converting the String "myLittleRow" into a byte array to - // use as a row key for our update. Once you have a Put instance, you can - // adorn it by setting the names of columns you want to update on the row, - // the timestamp to use in your update, etc.If no timestamp, the server - // applies current time to the edits. - Put p = new Put(Bytes.toBytes("myLittleRow")); - - // To set the value you'd like to update in the row 'myLittleRow', specify - // the column family, column qualifier, and value of the table cell you'd - // like to update. The column family must already exist in your table - // schema. The qualifier can be anything. All must be specified as byte - // arrays as hbase is all about byte arrays. Lets pretend the table - // 'myLittleHBaseTable' was created with a family 'myLittleFamily'. - p.add(Bytes.toBytes("myLittleFamily"), Bytes.toBytes("someQualifier"), - Bytes.toBytes("Some Value")); - - // Once you've adorned your Put instance with all the updates you want to - // make, to commit it do the following (The HTable#put method takes the - // Put instance you've been building and pushes the changes you made into - // hbase) - table.put(p); - - // Now, to retrieve the data we just wrote. The values that come back are - // Result instances. Generally, a Result is an object that will package up - // the hbase return into the form you find most palatable. - Get g = new Get(Bytes.toBytes("myLittleRow")); - Result r = table.get(g); - byte [] value = r.getValue(Bytes.toBytes("myLittleFamily"), - Bytes.toBytes("someQualifier")); - // If we convert the value bytes, we should get back 'Some Value', the - // value we inserted at this location. - String valueStr = Bytes.toString(value); - System.out.println("GET: " + valueStr); - - // Sometimes, you won't know the row you're looking for. In this case, you - // use a Scanner. This will give you cursor-like interface to the contents - // of the table. To set up a Scanner, do like you did above making a Put - // and a Get, create a Scan. Adorn it with column names, etc. - Scan s = new Scan(); - s.addColumn(Bytes.toBytes("myLittleFamily"), Bytes.toBytes("someQualifier")); - ResultScanner scanner = table.getScanner(s); + // Next you need a Connection to the cluster. Create one. When done with it, + // close it (Should start a try/finally after this creation so it gets closed + // for sure but leaving this out for readibility's sake). + Connection connection = ConnectionFactory.createConnection(config); try { - // Scanners return Result instances. - // Now, for the actual iteration. One way is to use a while loop like so: - for (Result rr = scanner.next(); rr != null; rr = scanner.next()) { - // print out the row we found and the columns we were looking for - System.out.println("Found row: " + rr); - } - - // The other approach is to use a foreach loop. Scanners are iterable! - // for (Result rr : scanner) { - // System.out.println("Found row: " + rr); - // } - } finally { - // Make sure you close your scanners when you are done! - // Thats why we have it inside a try/finally clause - scanner.close(); - } + + // This instantiates a Table object that connects you to + // the "myLittleHBaseTable" table (TableName.valueOf turns String into TableName instance). + // When done with it, close it (Should start a try/finally after this creation so it gets + // closed for sure but leaving this out for readibility's sake). + Table table = connection.getTable(TableName.valueOf("myLittleHBaseTable")); + try { + + // To add to a row, use Put. A Put constructor takes the name of the row + // you want to insert into as a byte array. In HBase, the Bytes class has + // utility for converting all kinds of java types to byte arrays. In the + // below, we are converting the String "myLittleRow" into a byte array to + // use as a row key for our update. Once you have a Put instance, you can + // adorn it by setting the names of columns you want to update on the row, + // the timestamp to use in your update, etc.If no timestamp, the server + // applies current time to the edits. + Put p = new Put(Bytes.toBytes("myLittleRow")); + + // To set the value you'd like to update in the row 'myLittleRow', specify + // the column family, column qualifier, and value of the table cell you'd + // like to update. The column family must already exist in your table + // schema. The qualifier can be anything. All must be specified as byte + // arrays as hbase is all about byte arrays. Lets pretend the table + // 'myLittleHBaseTable' was created with a family 'myLittleFamily'. + p.add(Bytes.toBytes("myLittleFamily"), Bytes.toBytes("someQualifier"), + Bytes.toBytes("Some Value")); + + // Once you've adorned your Put instance with all the updates you want to + // make, to commit it do the following (The HTable#put method takes the + // Put instance you've been building and pushes the changes you made into + // hbase) + table.put(p); + + // Now, to retrieve the data we just wrote. The values that come back are + // Result instances. Generally, a Result is an object that will package up + // the hbase return into the form you find most palatable. + Get g = new Get(Bytes.toBytes("myLittleRow")); + Result r = table.get(g); + byte [] value = r.getValue(Bytes.toBytes("myLittleFamily"), + Bytes.toBytes("someQualifier")); + // If we convert the value bytes, we should get back 'Some Value', the + // value we inserted at this location. + String valueStr = Bytes.toString(value); + System.out.println("GET: " + valueStr); + + // Sometimes, you won't know the row you're looking for. In this case, you + // use a Scanner. This will give you cursor-like interface to the contents + // of the table. To set up a Scanner, do like you did above making a Put + // and a Get, create a Scan. Adorn it with column names, etc. + Scan s = new Scan(); + s.addColumn(Bytes.toBytes("myLittleFamily"), Bytes.toBytes("someQualifier")); + ResultScanner scanner = table.getScanner(s); + try { + // Scanners return Result instances. + // Now, for the actual iteration. One way is to use a while loop like so: + for (Result rr = scanner.next(); rr != null; rr = scanner.next()) { + // print out the row we found and the columns we were looking for + System.out.println("Found row: " + rr); + } + + // The other approach is to use a foreach loop. Scanners are iterable! + // for (Result rr : scanner) { + // System.out.println("Found row: " + rr); + // } + } finally { + // Make sure you close your scanners when you are done! + // Thats why we have it inside a try/finally clause + scanner.close(); + } + + // Close your table and cluster connection. + } finally { + if (table != null) table.close(); + } + } finally { + connection.close(); + } } }

    There are many other methods for putting data into and getting data out of - HBase, but these examples should get you started. See the HTable javadoc for + HBase, but these examples should get you started. See the Table javadoc for more methods. Additionally, there are methods for managing tables in the - HBaseAdmin class.

    + Admin class.

    If your client is NOT Java, then you should consider the Thrift or REST libraries.

    @@ -168,20 +191,14 @@ public class MyLittleHBaseClient {

    Related Documentation

    -

    There are many other methods for putting data into and getting data out of - HBase, but these examples should get you started. See the HTable javadoc for - more methods. Additionally, there are methods for managing tables in the - HBaseAdmin class.

    -

    See also the section in the HBase Reference Guide where it discusses HBase Client. It - has section on how to access HBase from inside your multithreaded environtment + has section on how to access HBase from inside your multithreaded environment how to control resources consumed client-side, etc.

    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java index f13ce28..68c8e0a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java @@ -30,10 +30,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.util.StringUtils; @@ -47,23 +49,40 @@ public class QuotaRetriever implements Closeable, Iterable { private final Queue cache = new LinkedList(); private ResultScanner scanner; - private HTable table; + /** + * Connection to use. + * Could pass one in and have this class use it but this class wants to be standalone. + */ + private Connection connection; + private Table table; private QuotaRetriever() { } void init(final Configuration conf, final Scan scan) throws IOException { - table = new HTable(conf, QuotaTableUtil.QUOTA_TABLE_NAME); + this.connection = ConnectionFactory.createConnection(conf); + this.table = this.connection.getTable(QuotaTableUtil.QUOTA_TABLE_NAME); try { scanner = table.getScanner(scan); } catch (IOException e) { - table.close(); + try { + close(); + } catch (IOException ioe) { + LOG.warn("Failed getting scanner and then failed close on cleanup", e); + } throw e; } } public void close() throws IOException { - table.close(); + if (this.table != null) { + this.table.close(); + this.table = null; + } + if (this.connection != null) { + this.connection.close(); + this.connection = null; + } } public QuotaSettings next() throws IOException { @@ -163,4 +182,4 @@ public class QuotaRetriever implements Closeable, Iterable { scanner.init(conf, scan); return scanner; } -} +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java index 6153876..0ad81ae 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java @@ -27,15 +27,15 @@ import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.CompareFilter; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterList; @@ -78,41 +78,42 @@ public class QuotaTableUtil { /* ========================================================================= * Quota "settings" helpers */ - public static Quotas getTableQuota(final Configuration conf, final TableName table) + public static Quotas getTableQuota(final Connection connection, final TableName table) throws IOException { - return getQuotas(conf, getTableRowKey(table)); + return getQuotas(connection, getTableRowKey(table)); } - public static Quotas getNamespaceQuota(final Configuration conf, final String namespace) + public static Quotas getNamespaceQuota(final Connection connection, final String namespace) throws IOException { - return getQuotas(conf, getNamespaceRowKey(namespace)); + return getQuotas(connection, getNamespaceRowKey(namespace)); } - public static Quotas getUserQuota(final Configuration conf, final String user) + public static Quotas getUserQuota(final Connection connection, final String user) throws IOException { - return getQuotas(conf, getUserRowKey(user)); + return getQuotas(connection, getUserRowKey(user)); } - public static Quotas getUserQuota(final Configuration conf, final String user, + public static Quotas getUserQuota(final Connection connection, final String user, final TableName table) throws IOException { - return getQuotas(conf, getUserRowKey(user), getSettingsQualifierForUserTable(table)); + return getQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserTable(table)); } - public static Quotas getUserQuota(final Configuration conf, final String user, + public static Quotas getUserQuota(final Connection connection, final String user, final String namespace) throws IOException { - return getQuotas(conf, getUserRowKey(user), getSettingsQualifierForUserNamespace(namespace)); + return getQuotas(connection, getUserRowKey(user), + getSettingsQualifierForUserNamespace(namespace)); } - private static Quotas getQuotas(final Configuration conf, final byte[] rowKey) + private static Quotas getQuotas(final Connection connection, final byte[] rowKey) throws IOException { - return getQuotas(conf, rowKey, QUOTA_QUALIFIER_SETTINGS); + return getQuotas(connection, rowKey, QUOTA_QUALIFIER_SETTINGS); } - private static Quotas getQuotas(final Configuration conf, final byte[] rowKey, + private static Quotas getQuotas(final Connection connection, final byte[] rowKey, final byte[] qualifier) throws IOException { Get get = new Get(rowKey); get.addColumn(QUOTA_FAMILY_INFO, qualifier); - Result result = doGet(conf, get); + Result result = doGet(connection, get); if (result.isEmpty()) { return null; } @@ -321,23 +322,17 @@ public class QuotaTableUtil { /* ========================================================================= * HTable helpers */ - protected static Result doGet(final Configuration conf, final Get get) + protected static Result doGet(final Connection connection, final Get get) throws IOException { - HTable table = new HTable(conf, QUOTA_TABLE_NAME); - try { + try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { return table.get(get); - } finally { - table.close(); } } - protected static Result[] doGet(final Configuration conf, final List gets) + protected static Result[] doGet(final Connection connection, final List gets) throws IOException { - HTable table = new HTable(conf, QUOTA_TABLE_NAME); - try { + try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { return table.get(gets); - } finally { - table.close(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java index 922bf67..ae43c17 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java @@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -50,11 +50,7 @@ public class AccessControlClient { public static final TableName ACL_TABLE_NAME = TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "acl"); - private static HTable getAclTable(Configuration conf) throws IOException { - return new HTable(conf, ACL_TABLE_NAME); - } - - private static BlockingInterface getAccessControlServiceStub(HTable ht) + private static BlockingInterface getAccessControlServiceStub(Table ht) throws IOException { CoprocessorRpcChannel service = ht.coprocessorService(HConstants.EMPTY_START_ROW); BlockingInterface protocol = @@ -75,14 +71,12 @@ public class AccessControlClient { public static void grant(Configuration conf, final TableName tableName, final String userName, final byte[] family, final byte[] qual, final Permission.Action... actions) throws Throwable { - HTable ht = null; - try { - ht = getAclTable(conf); - ProtobufUtil.grant(getAccessControlServiceStub(ht), userName, tableName, family, qual, + // TODO: Make it so caller passes in a Connection rather than have us do this expensive + // setup each time. This class only used in test and shell at moment though. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + ProtobufUtil.grant(getAccessControlServiceStub(table), userName, tableName, family, qual, actions); - } finally { - if (ht != null) { - ht.close(); } } } @@ -97,26 +91,22 @@ public class AccessControlClient { */ public static void grant(Configuration conf, final String namespace, final String userName, final Permission.Action... actions) throws Throwable { - HTable ht = null; - try { - ht = getAclTable(conf); - ProtobufUtil.grant(getAccessControlServiceStub(ht), userName, namespace, actions); - } finally { - if (ht != null) { - ht.close(); + // TODO: Make it so caller passes in a Connection rather than have us do this expensive + // setup each time. This class only used in test and shell at moment though. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + ProtobufUtil.grant(getAccessControlServiceStub(table), userName, namespace, actions); } } } public static boolean isAccessControllerRunning(Configuration conf) throws MasterNotRunningException, ZooKeeperConnectionException, IOException { - HBaseAdmin ha = null; - try { - ha = new HBaseAdmin(conf); - return ha.isTableAvailable(ACL_TABLE_NAME); - } finally { - if (ha != null) { - ha.close(); + // TODO: Make it so caller passes in a Connection rather than have us do this expensive + // setup each time. This class only used in test and shell at moment though. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Admin admin = connection.getAdmin()) { + return admin.isTableAvailable(ACL_TABLE_NAME); } } } @@ -134,14 +124,12 @@ public class AccessControlClient { public static void revoke(Configuration conf, final TableName tableName, final String username, final byte[] family, final byte[] qualifier, final Permission.Action... actions) throws Throwable { - HTable ht = null; - try { - ht = getAclTable(conf); - ProtobufUtil.revoke(getAccessControlServiceStub(ht), username, tableName, family, qualifier, - actions); - } finally { - if (ht != null) { - ht.close(); + // TODO: Make it so caller passes in a Connection rather than have us do this expensive + // setup each time. This class only used in test and shell at moment though. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + ProtobufUtil.revoke(getAccessControlServiceStub(table), username, tableName, family, + qualifier, actions); } } } @@ -156,13 +144,11 @@ public class AccessControlClient { */ public static void revoke(Configuration conf, final String namespace, final String userName, final Permission.Action... actions) throws Throwable { - HTable ht = null; - try { - ht = getAclTable(conf); - ProtobufUtil.revoke(getAccessControlServiceStub(ht), userName, namespace, actions); - } finally { - if (ht != null) { - ht.close(); + // TODO: Make it so caller passes in a Connection rather than have us do this expensive + // setup each time. This class only used in test and shell at moment though. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + ProtobufUtil.revoke(getAccessControlServiceStub(table), userName, namespace, actions); } } } @@ -177,36 +163,29 @@ public class AccessControlClient { public static List getUserPermissions(Configuration conf, String tableRegex) throws Throwable { List permList = new ArrayList(); - Table ht = null; - Admin ha = null; - try { - ha = new HBaseAdmin(conf); - ht = new HTable(conf, ACL_TABLE_NAME); - CoprocessorRpcChannel service = ht.coprocessorService(HConstants.EMPTY_START_ROW); - BlockingInterface protocol = AccessControlProtos.AccessControlService - .newBlockingStub(service); - HTableDescriptor[] htds = null; - - if (tableRegex == null || tableRegex.isEmpty()) { - permList = ProtobufUtil.getUserPermissions(protocol); - } else if (tableRegex.charAt(0) == '@') { - String namespace = tableRegex.substring(1); - permList = ProtobufUtil.getUserPermissions(protocol, Bytes.toBytes(namespace)); - } else { - htds = ha.listTables(Pattern.compile(tableRegex)); - for (HTableDescriptor hd : htds) { - permList.addAll(ProtobufUtil.getUserPermissions(protocol, hd.getTableName())); + // TODO: Make it so caller passes in a Connection rather than have us do this expensive + // setup each time. This class only used in test and shell at moment though. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + try (Admin admin = connection.getAdmin()) { + CoprocessorRpcChannel service = table.coprocessorService(HConstants.EMPTY_START_ROW); + BlockingInterface protocol = + AccessControlProtos.AccessControlService.newBlockingStub(service); + HTableDescriptor[] htds = null; + if (tableRegex == null || tableRegex.isEmpty()) { + permList = ProtobufUtil.getUserPermissions(protocol); + } else if (tableRegex.charAt(0) == '@') { + String namespace = tableRegex.substring(1); + permList = ProtobufUtil.getUserPermissions(protocol, Bytes.toBytes(namespace)); + } else { + htds = admin.listTables(Pattern.compile(tableRegex)); + for (HTableDescriptor hd : htds) { + permList.addAll(ProtobufUtil.getUserPermissions(protocol, hd.getTableName())); + } + } } } - } finally { - if (ht != null) { - ht.close(); - } - if (ha != null) { - ha.close(); - } } return permList; } - -} +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java index 5ca5231..2818c24 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java @@ -26,7 +26,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; @@ -73,40 +74,39 @@ public class VisibilityClient { */ public static VisibilityLabelsResponse addLabels(Configuration conf, final String[] labels) throws Throwable { - Table ht = null; - try { - ht = new HTable(conf, LABELS_TABLE_NAME); - Batch.Call callable = - new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); + // TODO: Make it so caller passes in a Connection rather than have us do this expensive + // setup each time. This class only used in test and shell at moment though. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(LABELS_TABLE_NAME)) { + Batch.Call callable = + new Batch.Call() { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); - public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { - VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder(); - for (String label : labels) { - if (label.length() > 0) { - VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder(); - newBuilder.setLabel(ByteStringer.wrap(Bytes.toBytes(label))); - builder.addVisLabel(newBuilder.build()); + public VisibilityLabelsResponse call(VisibilityLabelsService service) + throws IOException { + VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder(); + for (String label : labels) { + if (label.length() > 0) { + VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder(); + newBuilder.setLabel(ByteStringer.wrap(Bytes.toBytes(label))); + builder.addVisLabel(newBuilder.build()); + } } + service.addLabels(controller, builder.build(), rpcCallback); + VisibilityLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - service.addLabels(controller, builder.build(), rpcCallback); - VisibilityLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; - Map result = ht.coprocessorService( - VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, - callable); - return result.values().iterator().next(); // There will be exactly one region for labels - // table and so one entry in result Map. - } finally { - if (ht != null) { - ht.close(); + }; + Map result = + table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY, callable); + return result.values().iterator().next(); // There will be exactly one region for labels + // table and so one entry in result Map. } } } @@ -131,33 +131,32 @@ public class VisibilityClient { * @throws Throwable */ public static GetAuthsResponse getAuths(Configuration conf, final String user) throws Throwable { - Table ht = null; - try { - ht = new HTable(conf, LABELS_TABLE_NAME); - Batch.Call callable = - new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); + // TODO: Make it so caller passes in a Connection rather than have us do this expensive + // setup each time. This class only used in test and shell at moment though. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(LABELS_TABLE_NAME)) { + Batch.Call callable = + new Batch.Call() { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); - public GetAuthsResponse call(VisibilityLabelsService service) throws IOException { - GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder(); - getAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); - service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback); - GetAuthsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); + public GetAuthsResponse call(VisibilityLabelsService service) throws IOException { + GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder(); + getAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); + service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback); + GetAuthsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - return response; - } - }; - Map result = ht.coprocessorService(VisibilityLabelsService.class, - HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable); - return result.values().iterator().next(); // There will be exactly one region for labels - // table and so one entry in result Map. - } finally { - if (ht != null) { - ht.close(); + }; + Map result = + table.coprocessorService(VisibilityLabelsService.class, + HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable); + return result.values().iterator().next(); // There will be exactly one region for labels + // table and so one entry in result Map. } } } @@ -177,44 +176,42 @@ public class VisibilityClient { private static VisibilityLabelsResponse setOrClearAuths(Configuration conf, final String[] auths, final String user, final boolean setOrClear) throws IOException, ServiceException, Throwable { - Table ht = null; - try { - ht = new HTable(conf, LABELS_TABLE_NAME); - Batch.Call callable = - new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); + // TODO: Make it so caller passes in a Connection rather than have us do this expensive + // setup each time. This class only used in test and shell at moment though. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(LABELS_TABLE_NAME)) { + Batch.Call callable = + new Batch.Call() { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); - public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { - SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder(); - setAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); - for (String auth : auths) { - if (auth.length() > 0) { - setAuthReqBuilder.addAuth(ByteStringer.wrap(Bytes.toBytes(auth))); + public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { + SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder(); + setAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); + for (String auth : auths) { + if (auth.length() > 0) { + setAuthReqBuilder.addAuth(ByteStringer.wrap(Bytes.toBytes(auth))); + } } + if (setOrClear) { + service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback); + } else { + service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback); + } + VisibilityLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - if (setOrClear) { - service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback); - } else { - service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback); - } - VisibilityLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; - Map result = ht.coprocessorService( - VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, - callable); - return result.values().iterator().next(); // There will be exactly one region for labels - // table and so one entry in result Map. - } finally { - if (ht != null) { - ht.close(); + }; + Map result = table.coprocessorService( + VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, + callable); + return result.values().iterator().next(); // There will be exactly one region for labels + // table and so one entry in result Map. } } } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java index aca6b27..6b79f80 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java @@ -18,9 +18,9 @@ */ package org.apache.hadoop.hbase; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -41,16 +41,12 @@ public interface Server extends Abortable, Stoppable { ZooKeeperWatcher getZooKeeper(); /** - * Returns reference to wrapped short-circuit (i.e. local, bypassing RPC layer entirely) - * HConnection to this server, which may be used for miscellaneous needs. + * Returns a reference to the servers' cluster connection. * - * Important note: this method returns reference to connection which is managed + * Important note: this method returns a reference to Connection which is managed * by Server itself, so callers must NOT attempt to close connection obtained. - * - * See {@link org.apache.hadoop.hbase.client.ConnectionUtils#createShortCircuitHConnection} - * for details on short-circuit connections. */ - HConnection getShortCircuitConnection(); + ClusterConnection getConnection(); /** * Returns instance of {@link org.apache.hadoop.hbase.zookeeper.MetaTableLocator} @@ -69,4 +65,4 @@ public interface Server extends Abortable, Stoppable { * Get CoordinatedStateManager instance for this server. */ CoordinatedStateManager getCoordinatedStateManager(); -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java index bba7a7d..6f06476 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/ZKTableArchiveClient.java @@ -22,7 +22,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -36,9 +36,9 @@ public class ZKTableArchiveClient extends Configured { /** Configuration key for the archive node. */ private static final String ZOOKEEPER_ZNODE_HFILE_ARCHIVE_KEY = "zookeeper.znode.hfile.archive"; - private HConnection connection; + private ClusterConnection connection; - public ZKTableArchiveClient(Configuration conf, HConnection connection) { + public ZKTableArchiveClient(Configuration conf, ClusterConnection connection) { super(conf); this.connection = connection; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java index 660733d..a47bd7d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java @@ -18,10 +18,12 @@ */ package org.apache.hadoop.hbase.client; -import com.google.protobuf.Descriptors.MethodDescriptor; -import com.google.protobuf.Message; -import com.google.protobuf.Service; -import com.google.protobuf.ServiceException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutorService; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; @@ -32,11 +34,10 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.io.MultipleIOException; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ExecutorService; +import com.google.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.Message; +import com.google.protobuf.Service; +import com.google.protobuf.ServiceException; /** * A wrapper for HTable. Can be used to restrict privilege. @@ -55,13 +56,13 @@ import java.util.concurrent.ExecutorService; public class HTableWrapper implements HTableInterface { private TableName tableName; - private HTable table; + private final Table table; private ClusterConnection connection; private final List openTables; /** * @param openTables External list of tables used for tracking wrappers. - * @throws IOException + * @throws IOException */ public static HTableInterface createWrapper(List openTables, TableName tableName, Environment env, ExecutorService pool) throws IOException { @@ -73,7 +74,7 @@ public class HTableWrapper implements HTableInterface { ClusterConnection connection, ExecutorService pool) throws IOException { this.tableName = tableName; - this.table = new HTable(tableName, connection, pool); + this.table = connection.getTable(tableName, pool); this.connection = connection; this.openTables = openTables; this.openTables.add(this); @@ -82,7 +83,7 @@ public class HTableWrapper implements HTableInterface { public void internalClose() throws IOException { List exceptions = new ArrayList(2); try { - table.close(); + table.close(); } catch (IOException e) { exceptions.add(e); } @@ -114,7 +115,12 @@ public class HTableWrapper implements HTableInterface { @Deprecated public Result getRowOrBefore(byte[] row, byte[] family) throws IOException { - return table.getRowOrBefore(row, family); + Scan scan = Scan.createGetClosestRowOrBeforeReverseScan(row); + Result startRowResult = null; + try (ResultScanner resultScanner = this.table.getScanner(scan)) { + startRowResult = resultScanner.next(); + } + return startRowResult; } public Result get(Get get) throws IOException { @@ -130,8 +136,15 @@ public class HTableWrapper implements HTableInterface { } @Deprecated - public Boolean[] exists(List gets) throws IOException{ - return table.exists(gets); + public Boolean[] exists(List gets) throws IOException { + // Do convertion. + boolean [] exists = table.existsAll(gets); + if (exists == null) return null; + Boolean [] results = new Boolean [exists.length]; + for (int i = 0; i < exists.length; i++) { + results[i] = exists[i]? Boolean.TRUE: Boolean.FALSE; + } + return results; } public void put(Put put) throws IOException { @@ -254,7 +267,7 @@ public class HTableWrapper implements HTableInterface { /** * {@inheritDoc} * @deprecated If any exception is thrown by one of the actions, there is no way to - * retrieve the partially executed results. Use + * retrieve the partially executed results. Use * {@link #batchCallback(List, Object[], org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)} * instead. */ @@ -296,12 +309,12 @@ public class HTableWrapper implements HTableInterface { @Override public void setAutoFlush(boolean autoFlush) { - table.setAutoFlush(autoFlush, autoFlush); + table.setAutoFlushTo(autoFlush); } @Override public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) { - table.setAutoFlush(autoFlush, clearBufferOnFail); + throw new UnsupportedOperationException(); } @Override @@ -322,7 +335,8 @@ public class HTableWrapper implements HTableInterface { @Override public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, boolean writeToWAL) throws IOException { - return table.incrementColumnValue(row, family, qualifier, amount, writeToWAL); + return table.incrementColumnValue(row, family, qualifier, amount, + writeToWAL? Durability.USE_DEFAULT: Durability.SKIP_WAL); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java index 11acea0..be131e8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java @@ -22,11 +22,12 @@ import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; @@ -47,26 +48,28 @@ import org.apache.hadoop.mapred.Partitioner; public class HRegionPartitioner implements Partitioner { private static final Log LOG = LogFactory.getLog(HRegionPartitioner.class); - private RegionLocator table; + // Connection and locator are not cleaned up; they just die when partitioner is done. + private Connection connection; + private RegionLocator locator; private byte[][] startKeys; public void configure(JobConf job) { try { - this.table = new HTable(HBaseConfiguration.create(job), - TableName.valueOf(job.get(TableOutputFormat.OUTPUT_TABLE))); + this.connection = ConnectionFactory.createConnection(HBaseConfiguration.create(job)); + TableName tableName = TableName.valueOf(job.get(TableOutputFormat.OUTPUT_TABLE)); + this.locator = this.connection.getRegionLocator(tableName); } catch (IOException e) { LOG.error(e); } try { - this.startKeys = this.table.getStartKeys(); + this.startKeys = this.locator.getStartKeys(); } catch (IOException e) { LOG.error(e); } } - public int getPartition(ImmutableBytesWritable key, - V2 value, int numPartitions) { + public int getPartition(ImmutableBytesWritable key, V2 value, int numPartitions) { byte[] region = null; // Only one region return 0 if (this.startKeys.length == 1){ @@ -75,7 +78,7 @@ implements Partitioner { try { // Not sure if this is cached after a split so we could have problems // here if a region splits while mapping - region = table.getRegionLocation(key.get()).getRegionInfo().getStartKey(); + region = locator.getRegionLocation(key.get()).getRegionInfo().getStartKey(); } catch (IOException e) { LOG.error(e); } @@ -92,4 +95,4 @@ implements Partitioner { // if above fails to find start key that match we need to return something return 0; } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java index 0f03159..1afb9d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.MutationSerialization; @@ -211,7 +212,8 @@ public class TableMapReduceUtil { MutationSerialization.class.getName(), ResultSerialization.class.getName()); if (partitioner == HRegionPartitioner.class) { job.setPartitionerClass(HRegionPartitioner.class); - int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table); + int regions = + MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table)); if (job.getNumReduceTasks() > regions) { job.setNumReduceTasks(regions); } @@ -275,9 +277,11 @@ public class TableMapReduceUtil { * @param job The current job configuration to adjust. * @throws IOException When retrieving the table details fails. */ + // Used by tests. public static void limitNumReduceTasks(String table, JobConf job) throws IOException { - int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table); + int regions = + MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table)); if (job.getNumReduceTasks() > regions) job.setNumReduceTasks(regions); } @@ -290,9 +294,11 @@ public class TableMapReduceUtil { * @param job The current job configuration to adjust. * @throws IOException When retrieving the table details fails. */ + // Used by tests. public static void limitNumMapTasks(String table, JobConf job) throws IOException { - int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table); + int regions = + MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table)); if (job.getNumMapTasks() > regions) job.setNumMapTasks(regions); } @@ -307,7 +313,8 @@ public class TableMapReduceUtil { */ public static void setNumReduceTasks(String table, JobConf job) throws IOException { - job.setNumReduceTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table)); + job.setNumReduceTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), + TableName.valueOf(table))); } /** @@ -320,7 +327,8 @@ public class TableMapReduceUtil { */ public static void setNumMapTasks(String table, JobConf job) throws IOException { - job.setNumMapTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table)); + job.setNumMapTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), + TableName.valueOf(table))); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java index dab39a8..563b1f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java @@ -20,23 +20,19 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.mapred.FileOutputFormat; import org.apache.hadoop.mapred.InvalidJobConfException; import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.FileOutputFormat; import org.apache.hadoop.mapred.RecordWriter; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.util.Progressable; @@ -50,55 +46,51 @@ public class TableOutputFormat extends FileOutputFormat { - private final Connection conn; - private final Table table; + private Table m_table; /** * Instantiate a TableRecordWriter with the HBase HClient for writing. Assumes control over the * lifecycle of {@code conn}. */ - public TableRecordWriter(Connection conn, TableName tableName) throws IOException { - this.conn = conn; - this.table = conn.getTable(tableName); - ((HTable) this.table).setAutoFlush(false, true); + public TableRecordWriter(final Table table) throws IOException { + this.m_table = table; } public void close(Reporter reporter) throws IOException { - table.close(); - conn.close(); + this.m_table.close(); } public void write(ImmutableBytesWritable key, Put value) throws IOException { - table.put(new Put(value)); + m_table.put(new Put(value)); } } @Override - public RecordWriter getRecordWriter(FileSystem ignored, JobConf job, - String name, Progressable progress) throws IOException { + public RecordWriter getRecordWriter(FileSystem ignored, JobConf job, String name, + Progressable progress) + throws IOException { + // expecting exactly one path TableName tableName = TableName.valueOf(job.get(OUTPUT_TABLE)); - Connection conn = null; - try { - conn = ConnectionFactory.createConnection(HBaseConfiguration.create(job)); - } catch(IOException e) { - LOG.error(e); - throw e; - } - return new TableRecordWriter(conn, tableName); + Table table = null; + // Connection is not closed. Dies with JVM. No possibility for cleanup. + Connection connection = ConnectionFactory.createConnection(job); + table = connection.getTable(tableName); + // Clear write buffer on fail is true by default so no need to reset it. + table.setAutoFlushTo(false); + return new TableRecordWriter(table); } @Override public void checkOutputSpecs(FileSystem ignored, JobConf job) - throws FileAlreadyExistsException, InvalidJobConfException, IOException { + throws FileAlreadyExistsException, InvalidJobConfException, IOException { String tableName = job.get(OUTPUT_TABLE); - if(tableName == null) { + if (tableName == null) { throw new IOException("Must specify table name"); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java index 150bb25..deb59c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java @@ -28,11 +28,12 @@ import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.Tag; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; @@ -66,44 +67,55 @@ public class DefaultVisibilityExpressionResolver implements VisibilityExpression @Override public void init() { // Reading all the labels and ordinal. - // This scan should be done by user with global_admin previliges.. Ensure that it works + // This scan should be done by user with global_admin privileges.. Ensure that it works Table labelsTable = null; + Connection connection = null; try { - labelsTable = new HTable(conf, LABELS_TABLE_NAME); - } catch (TableNotFoundException e) { - // Just return with out doing any thing. When the VC is not used we wont be having 'labels' - // table in the cluster. - return; - } catch (IOException e) { - LOG.error("Error opening 'labels' table", e); - return; - } - Scan scan = new Scan(); - scan.setAuthorizations(new Authorizations(VisibilityUtils.SYSTEM_LABEL)); - scan.addColumn(LABELS_TABLE_FAMILY, LABEL_QUALIFIER); - ResultScanner scanner = null; - try { - scanner = labelsTable.getScanner(scan); - Result next = null; - while ((next = scanner.next()) != null) { - byte[] row = next.getRow(); - byte[] value = next.getValue(LABELS_TABLE_FAMILY, LABEL_QUALIFIER); - labels.put(Bytes.toString(value), Bytes.toInt(row)); + connection = ConnectionFactory.createConnection(conf); + try { + labelsTable = connection.getTable(LABELS_TABLE_NAME); + } catch (TableNotFoundException e) { + // Just return with out doing any thing. When the VC is not used we wont be having 'labels' + // table in the cluster. + return; + } catch (IOException e) { + LOG.error("Error opening 'labels' table", e); + return; } - } catch (IOException e) { - LOG.error("Error reading 'labels' table", e); - } finally { + Scan scan = new Scan(); + scan.setAuthorizations(new Authorizations(VisibilityUtils.SYSTEM_LABEL)); + scan.addColumn(LABELS_TABLE_FAMILY, LABEL_QUALIFIER); + ResultScanner scanner = null; try { - if (scanner != null) { - scanner.close(); + scanner = labelsTable.getScanner(scan); + Result next = null; + while ((next = scanner.next()) != null) { + byte[] row = next.getRow(); + byte[] value = next.getValue(LABELS_TABLE_FAMILY, LABEL_QUALIFIER); + labels.put(Bytes.toString(value), Bytes.toInt(row)); } + } catch (IOException e) { + LOG.error("Error scanning 'labels' table", e); } finally { + if (scanner != null) scanner.close(); + } + } catch (IOException ioe) { + LOG.error("Failed reading 'labels' tags", ioe); + return; + } finally { + if (labelsTable != null) { try { labelsTable.close(); - } catch (IOException e) { - LOG.warn("Error on closing 'labels' table", e); + } catch (IOException ioe) { + LOG.warn("Error closing 'labels' table", ioe); } } + if (connection != null) + try { + connection.close(); + } catch (IOException ioe) { + LOG.warn("Failed close of temporary connection", ioe); + } } } @@ -117,4 +129,4 @@ public class DefaultVisibilityExpressionResolver implements VisibilityExpression }; return VisibilityUtils.createVisibilityExpTags(visExpression, true, false, null, provider); } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java index f88d959..24ca8e6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java @@ -28,9 +28,11 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.mapred.TableOutputFormat; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Partitioner; @@ -55,7 +57,9 @@ implements Configurable { private static final Log LOG = LogFactory.getLog(HRegionPartitioner.class); private Configuration conf = null; - private RegionLocator table; + // Connection and locator are not cleaned up; they just die when partitioner is done. + private Connection connection; + private RegionLocator locator; private byte[][] startKeys; /** @@ -82,7 +86,7 @@ implements Configurable { try { // Not sure if this is cached after a split so we could have problems // here if a region splits while mapping - region = table.getRegionLocation(key.get()).getRegionInfo().getStartKey(); + region = this.locator.getRegionLocation(key.get()).getRegionInfo().getStartKey(); } catch (IOException e) { LOG.error(e); } @@ -123,14 +127,14 @@ implements Configurable { public void setConf(Configuration configuration) { this.conf = HBaseConfiguration.create(configuration); try { - TableName tableName = TableName.valueOf(configuration - .get(TableOutputFormat.OUTPUT_TABLE)); - this.table = new HTable(this.conf, tableName); + this.connection = ConnectionFactory.createConnection(HBaseConfiguration.create(conf)); + TableName tableName = TableName.valueOf(conf.get(TableOutputFormat.OUTPUT_TABLE)); + this.locator = this.connection.getRegionLocator(tableName); } catch (IOException e) { LOG.error(e); } try { - this.startKeys = this.table.getStartKeys(); + this.startKeys = this.locator.getStartKeys(); } catch (IOException e) { LOG.error(e); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java index f586523..b54e3ea 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java @@ -41,9 +41,12 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Base64; import org.apache.hadoop.hbase.util.Bytes; @@ -401,86 +404,90 @@ public class ImportTsv extends Configured implements Tool { */ public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException, ClassNotFoundException { + Job job = null; + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Admin admin = connection.getAdmin()) { + // Support non-XML supported characters + // by re-encoding the passed separator as a Base64 string. + String actualSeparator = conf.get(SEPARATOR_CONF_KEY); + if (actualSeparator != null) { + conf.set(SEPARATOR_CONF_KEY, + Base64.encodeBytes(actualSeparator.getBytes())); + } - HBaseAdmin admin = new HBaseAdmin(conf); - // Support non-XML supported characters - // by re-encoding the passed separator as a Base64 string. - String actualSeparator = conf.get(SEPARATOR_CONF_KEY); - if (actualSeparator != null) { - conf.set(SEPARATOR_CONF_KEY, - Base64.encodeBytes(actualSeparator.getBytes())); - } + // See if a non-default Mapper was set + String mapperClassName = conf.get(MAPPER_CONF_KEY); + Class mapperClass = + mapperClassName != null ? Class.forName(mapperClassName) : DEFAULT_MAPPER; + + TableName tableName = TableName.valueOf(args[0]); + Path inputDir = new Path(args[1]); + String jobName = conf.get(JOB_NAME_CONF_KEY,NAME + "_" + tableName.getNameAsString()); + job = Job.getInstance(conf, jobName); + job.setJarByClass(mapperClass); + FileInputFormat.setInputPaths(job, inputDir); + job.setInputFormatClass(TextInputFormat.class); + job.setMapperClass(mapperClass); + String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY); + String columns[] = conf.getStrings(COLUMNS_CONF_KEY); + if(StringUtils.isNotEmpty(conf.get(CREDENTIALS_LOCATION))) { + String fileLoc = conf.get(CREDENTIALS_LOCATION); + Credentials cred = Credentials.readTokenStorageFile(new File(fileLoc), conf); + job.getCredentials().addAll(cred); + } - // See if a non-default Mapper was set - String mapperClassName = conf.get(MAPPER_CONF_KEY); - Class mapperClass = mapperClassName != null ? - Class.forName(mapperClassName) : DEFAULT_MAPPER; - - TableName tableName = TableName.valueOf(args[0]); - Path inputDir = new Path(args[1]); - String jobName = conf.get(JOB_NAME_CONF_KEY,NAME + "_" + tableName.getNameAsString()); - Job job = Job.getInstance(conf, jobName); - job.setJarByClass(mapperClass); - FileInputFormat.setInputPaths(job, inputDir); - job.setInputFormatClass(TextInputFormat.class); - job.setMapperClass(mapperClass); - String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY); - String columns[] = conf.getStrings(COLUMNS_CONF_KEY); - if(StringUtils.isNotEmpty(conf.get(CREDENTIALS_LOCATION))) { - String fileLoc = conf.get(CREDENTIALS_LOCATION); - Credentials cred = Credentials.readTokenStorageFile(new File(fileLoc), conf); - job.getCredentials().addAll(cred); - } + if (hfileOutPath != null) { + if (!admin.tableExists(tableName)) { + String errorMsg = format("Table '%s' does not exist.", tableName); + if ("yes".equalsIgnoreCase(conf.get(CREATE_TABLE_CONF_KEY, "yes"))) { + LOG.warn(errorMsg); + // TODO: this is backwards. Instead of depending on the existence of a table, + // create a sane splits file for HFileOutputFormat based on data sampling. + createTable(admin, tableName, columns); + } else { + LOG.error(errorMsg); + throw new TableNotFoundException(errorMsg); + } + } + try (HTable table = (HTable)connection.getTable(tableName)) { + job.setReducerClass(PutSortReducer.class); + Path outputDir = new Path(hfileOutPath); + FileOutputFormat.setOutputPath(job, outputDir); + job.setMapOutputKeyClass(ImmutableBytesWritable.class); + if (mapperClass.equals(TsvImporterTextMapper.class)) { + job.setMapOutputValueClass(Text.class); + job.setReducerClass(TextSortReducer.class); + } else { + job.setMapOutputValueClass(Put.class); + job.setCombinerClass(PutCombiner.class); + } + HFileOutputFormat.configureIncrementalLoad(job, table); + } + } else { + if (!admin.tableExists(tableName)) { + String errorMsg = format("Table '%s' does not exist.", tableName); + LOG.error(errorMsg); + throw new TableNotFoundException(errorMsg); + } + if (mapperClass.equals(TsvImporterTextMapper.class)) { + usage(TsvImporterTextMapper.class.toString() + + " should not be used for non bulkloading case. use " + + TsvImporterMapper.class.toString() + + " or custom mapper whose value type is Put."); + System.exit(-1); + } + // No reducers. Just write straight to table. Call initTableReducerJob + // to set up the TableOutputFormat. + TableMapReduceUtil.initTableReducerJob(tableName.getNameAsString(), null, + job); + job.setNumReduceTasks(0); + } - if (hfileOutPath != null) { - if (!admin.tableExists(tableName)) { - String errorMsg = format("Table '%s' does not exist.", tableName); - if ("yes".equalsIgnoreCase(conf.get(CREATE_TABLE_CONF_KEY, "yes"))) { - LOG.warn(errorMsg); - // TODO: this is backwards. Instead of depending on the existence of a table, - // create a sane splits file for HFileOutputFormat based on data sampling. - createTable(admin, tableName, columns); - } else { - LOG.error(errorMsg); - throw new TableNotFoundException(errorMsg); - } + TableMapReduceUtil.addDependencyJars(job); + TableMapReduceUtil.addDependencyJars(job.getConfiguration(), + com.google.common.base.Function.class /* Guava used by TsvParser */); } - HTable table = new HTable(conf, tableName); - job.setReducerClass(PutSortReducer.class); - Path outputDir = new Path(hfileOutPath); - FileOutputFormat.setOutputPath(job, outputDir); - job.setMapOutputKeyClass(ImmutableBytesWritable.class); - if (mapperClass.equals(TsvImporterTextMapper.class)) { - job.setMapOutputValueClass(Text.class); - job.setReducerClass(TextSortReducer.class); - } else { - job.setMapOutputValueClass(Put.class); - job.setCombinerClass(PutCombiner.class); - } - HFileOutputFormat.configureIncrementalLoad(job, table); - } else { - if (!admin.tableExists(tableName)) { - String errorMsg = format("Table '%s' does not exist.", tableName); - LOG.error(errorMsg); - throw new TableNotFoundException(errorMsg); - } - if (mapperClass.equals(TsvImporterTextMapper.class)) { - usage(TsvImporterTextMapper.class.toString() - + " should not be used for non bulkloading case. use " - + TsvImporterMapper.class.toString() - + " or custom mapper whose value type is Put."); - System.exit(-1); - } - // No reducers. Just write straight to table. Call initTableReducerJob - // to set up the TableOutputFormat. - TableMapReduceUtil.initTableReducerJob(tableName.getNameAsString(), null, - job); - job.setNumReduceTasks(0); } - - TableMapReduceUtil.addDependencyJars(job); - TableMapReduceUtil.addDependencyJars(job.getConfiguration(), - com.google.common.base.Function.class /* Guava used by TsvParser */); return job; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index f69be50..ebe7d36 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -34,8 +34,6 @@ import java.util.zip.ZipFile; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -43,10 +41,11 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.security.User; @@ -662,7 +661,7 @@ public class TableMapReduceUtil { job.setOutputValueClass(Writable.class); if (partitioner == HRegionPartitioner.class) { job.setPartitionerClass(HRegionPartitioner.class); - int regions = MetaTableAccessor.getRegionCount(conf, table); + int regions = MetaTableAccessor.getRegionCount(conf, TableName.valueOf(table)); if (job.getNumReduceTasks() > regions) { job.setNumReduceTasks(regions); } @@ -687,7 +686,8 @@ public class TableMapReduceUtil { */ public static void limitNumReduceTasks(String table, Job job) throws IOException { - int regions = MetaTableAccessor.getRegionCount(job.getConfiguration(), table); + int regions = + MetaTableAccessor.getRegionCount(job.getConfiguration(), TableName.valueOf(table)); if (job.getNumReduceTasks() > regions) job.setNumReduceTasks(regions); } @@ -702,7 +702,8 @@ public class TableMapReduceUtil { */ public static void setNumReduceTasks(String table, Job job) throws IOException { - job.setNumReduceTasks(MetaTableAccessor.getRegionCount(job.getConfiguration(), table)); + job.setNumReduceTasks(MetaTableAccessor.getRegionCount(job.getConfiguration(), + TableName.valueOf(table))); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java index c7c6639..de29f37 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java @@ -47,8 +47,6 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; * Convert Map/Reduce output and write it to an HBase table. The KEY is ignored * while the output value must be either a {@link Put} or a * {@link Delete} instance. - * - *

    is the type of the key. Ignored in this class. */ @InterfaceAudience.Public @InterfaceStability.Stable diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 7f41aa1..36b322f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -339,7 +339,7 @@ public class AssignmentManager { if (TableName.META_TABLE_NAME.equals(tableName)) { hris = new MetaTableLocator().getMetaRegions(server.getZooKeeper()); } else { - hris = MetaTableAccessor.getTableRegions(server.getShortCircuitConnection(), tableName, true); + hris = MetaTableAccessor.getTableRegions(server.getConnection(), tableName, true); } Integer pending = 0; @@ -565,7 +565,7 @@ public class AssignmentManager { ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region)); } FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes, - this.server.getShortCircuitConnection()); + this.server.getConnection()); } /** @@ -1564,7 +1564,7 @@ public class AssignmentManager { TableState.State.ENABLING); // Region assignment from META - List results = MetaTableAccessor.fullScanOfMeta(server.getShortCircuitConnection()); + List results = MetaTableAccessor.fullScanOfMeta(server.getConnection()); // Get any new but slow to checkin region server that joined the cluster Set onlineServers = serverManager.getOnlineServers().keySet(); // Set of offline servers to be returned diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index 886991c..25c405c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -29,18 +29,19 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Chore; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.HFileArchiver; -import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.MetaScanner; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.client.Result; @@ -62,6 +63,7 @@ public class CatalogJanitor extends Chore { private final MasterServices services; private AtomicBoolean enabled = new AtomicBoolean(true); private AtomicBoolean alreadyRunning = new AtomicBoolean(false); + private final Connection connection; CatalogJanitor(final Server server, final MasterServices services) { super("CatalogJanitor-" + server.getServerName().toShortString(), @@ -69,6 +71,7 @@ public class CatalogJanitor extends Chore { server); this.server = server; this.services = services; + this.connection = server.getConnection(); } @Override @@ -163,7 +166,7 @@ public class CatalogJanitor extends Chore { // Run full scan of hbase:meta catalog table passing in our custom visitor with // the start row - MetaScanner.metaScan(server.getConfiguration(), null, visitor, tableName); + MetaScanner.metaScan(server.getConfiguration(), this.connection, visitor, tableName); return new Triple, Map>( count.get(), mergedRegions, splitParents); @@ -198,7 +201,7 @@ public class CatalogJanitor extends Chore { + " from fs because merged region no longer holds references"); HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA); HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB); - MetaTableAccessor.deleteMergeQualifiers(server.getShortCircuitConnection(), + MetaTableAccessor.deleteMergeQualifiers(server.getConnection(), mergedRegion); return true; } @@ -331,7 +334,7 @@ public class CatalogJanitor extends Chore { FileSystem fs = this.services.getMasterFileSystem().getFileSystem(); if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + parent); HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent); - MetaTableAccessor.deleteRegion(this.server.getShortCircuitConnection(), parent); + MetaTableAccessor.deleteRegion(this.connection, parent); result = true; } return result; @@ -404,7 +407,7 @@ public class CatalogJanitor extends Chore { // Get merge regions if it is a merged region and already has merge // qualifier Pair mergeRegions = MetaTableAccessor - .getRegionsFromMergeQualifier(this.services.getShortCircuitConnection(), + .getRegionsFromMergeQualifier(this.services.getConnection(), region.getRegionName()); if (mergeRegions == null || (mergeRegions.getFirst() == null && mergeRegions.getSecond() == null)) { @@ -420,4 +423,4 @@ public class CatalogJanitor extends Chore { return cleanMergeRegion(region, mergeRegions.getFirst(), mergeRegions.getSecond()); } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 3437f34..739ac76 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -538,12 +538,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId()); this.serverManager = createServerManager(this, this); - synchronized (this) { - if (shortCircuitConnection == null) { - shortCircuitConnection = createShortCircuitConnection(); - metaTableLocator = new MetaTableLocator(); - } - } + setupClusterConnection(); // Invalidate all write locks held previously this.tableLockManager.reapWriteLocks(); @@ -721,7 +716,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { metaState.getState(), metaState.getServerName(), null); if (!metaState.isOpened() || !metaTableLocator.verifyMetaRegionLocation( - this.getShortCircuitConnection(), this.getZooKeeper(), timeout)) { + this.getConnection(), this.getZooKeeper(), timeout)) { ServerName currentMetaServer = metaState.getServerName(); if (serverManager.isServerOnline(currentMetaServer)) { LOG.info("Meta was in transition on " + currentMetaServer); @@ -1492,6 +1487,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { * is found, but not currently deployed, the second element of the pair * may be null. */ + @VisibleForTesting // Used by TestMaster. Pair getTableRegionForRow( final TableName tableName, final byte [] rowKey) throws IOException { @@ -1542,7 +1538,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { if (isCatalogTable(tableName)) { throw new IOException("Can't modify catalog tables"); } - if (!MetaTableAccessor.tableExists(getShortCircuitConnection(), tableName)) { + if (!MetaTableAccessor.tableExists(getConnection(), tableName)) { throw new TableNotFoundException(tableName); } if (!getAssignmentManager().getTableStateManager(). diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 2efcf63..c3012f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1132,7 +1132,7 @@ public class MasterRpcServices extends RSRpcServices try { master.checkInitialized(); Pair pair = - MetaTableAccessor.getRegion(master.getShortCircuitConnection(), regionName); + MetaTableAccessor.getRegion(master.getConnection(), regionName); if (pair == null) throw new UnknownRegionException(Bytes.toStringBinary(regionName)); HRegionInfo hri = pair.getFirst(); if (master.cpHost != null) { @@ -1263,7 +1263,7 @@ public class MasterRpcServices extends RSRpcServices + " actual: " + type); } Pair pair = - MetaTableAccessor.getRegion(master.getShortCircuitConnection(), regionName); + MetaTableAccessor.getRegion(master.getConnection(), regionName); if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName)); HRegionInfo hri = pair.getFirst(); if (master.cpHost != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java index ae4af4a..8f7d0f3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java @@ -240,11 +240,11 @@ public class RegionStateStore { void splitRegion(HRegionInfo p, HRegionInfo a, HRegionInfo b, ServerName sn) throws IOException { - MetaTableAccessor.splitRegion(server.getShortCircuitConnection(), p, a, b, sn); + MetaTableAccessor.splitRegion(server.getConnection(), p, a, b, sn); } void mergeRegions(HRegionInfo p, HRegionInfo a, HRegionInfo b, ServerName sn) throws IOException { - MetaTableAccessor.mergeRegions(server.getShortCircuitConnection(), p, a, b, sn); + MetaTableAccessor.mergeRegions(server.getConnection(), p, a, b, sn); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index b96aaee..796536a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -934,7 +934,7 @@ public class RegionStates { try { Pair p = - MetaTableAccessor.getRegion(server.getShortCircuitConnection(), regionName); + MetaTableAccessor.getRegion(server.getConnection(), regionName); HRegionInfo hri = p == null ? null : p.getFirst(); if (hri != null) { createRegionState(hri); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java index 7132555..31d3fab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.ZKNamespaceManager; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -69,7 +68,7 @@ public class TableNamespaceManager { private Configuration conf; private MasterServices masterServices; - private HTable nsTable; + private Table nsTable; private ZKNamespaceManager zkNamespaceManager; private boolean initialized; @@ -82,7 +81,7 @@ public class TableNamespaceManager { } public void start() throws IOException { - if (!MetaTableAccessor.tableExists(masterServices.getShortCircuitConnection(), + if (!MetaTableAccessor.tableExists(masterServices.getConnection(), TableName.NAMESPACE_TABLE_NAME)) { LOG.info("Namespace table not found. Creating..."); createNamespaceTable(masterServices); @@ -253,16 +252,14 @@ public class TableNamespaceManager { public synchronized boolean isTableAvailableAndInitialized() throws IOException { // Did we already get a table? If so, still make sure it's available if (initialized) { - if (nsTable.getConnection().isClosed()) { - nsTable = new HTable(conf, TableName.NAMESPACE_TABLE_NAME); - } + this.nsTable = this.masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME); return true; } // Now check if the table is assigned, if not then fail fast if (isTableAssigned() && isTableEnabled()) { try { - nsTable = new HTable(conf, TableName.NAMESPACE_TABLE_NAME); + nsTable = this.masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME); zkNamespaceManager = new ZKNamespaceManager(masterServices.getZooKeeper()); zkNamespaceManager.start(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java index 01c1f89..c884806 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java @@ -25,21 +25,21 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Random; import java.util.Set; -import java.util.Map.Entry; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.RackManager; @@ -121,12 +121,14 @@ public class FavoredNodeAssignmentHelper { } } // Write the region assignments to the meta table. - Table metaTable = null; - try { - metaTable = new HTable(conf, TableName.META_TABLE_NAME); - metaTable.put(puts); - } finally { - if (metaTable != null) metaTable.close(); + // TODO: See above overrides take a Connection rather than a Configuration only the + // Connection is a short circuit connection. That is not going to good in all cases, when + // master and meta are not colocated. Fix when this favored nodes feature is actually used + // someday. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { + metaTable.put(puts); + } } LOG.info("Added " + puts.size() + " regions in META"); } @@ -304,7 +306,6 @@ public class FavoredNodeAssignmentHelper { * primary/secondary/tertiary RegionServers * @param primaryRSMap * @return the map of regions to the servers the region-files should be hosted on - * @throws IOException */ public Map placeSecondaryAndTertiaryWithRestrictions( Map primaryRSMap) { @@ -603,4 +604,4 @@ public class FavoredNodeAssignmentHelper { } return strBuf.toString(); } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java index 694e902..111de62 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java @@ -75,7 +75,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer { List plans = new ArrayList(); //perform a scan of the meta to get the latest updates (if any) SnapshotOfRegionAssignmentFromMeta snaphotOfRegionAssignment = - new SnapshotOfRegionAssignmentFromMeta(super.services.getShortCircuitConnection()); + new SnapshotOfRegionAssignmentFromMeta(super.services.getConnection()); try { snaphotOfRegionAssignment.initialize(); } catch (IOException ie) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java index 359315e..adf1004 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java @@ -119,7 +119,7 @@ public class CreateTableHandler extends EventHandler { boolean success = false; try { TableName tableName = this.hTableDescriptor.getTableName(); - if (MetaTableAccessor.tableExists(this.server.getShortCircuitConnection(), tableName)) { + if (MetaTableAccessor.tableExists(this.server.getConnection(), tableName)) { throw new TableExistsException(tableName); } success = true; @@ -289,6 +289,6 @@ public class CreateTableHandler extends EventHandler { */ protected void addRegionsToMeta(final List regionInfos) throws IOException { - MetaTableAccessor.addRegionsToMeta(this.server.getShortCircuitConnection(), regionInfos); + MetaTableAccessor.addRegionsToMeta(this.server.getConnection(), regionInfos); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java index b36eb95..905f899 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java @@ -135,7 +135,7 @@ public class DeleteTableHandler extends TableEventHandler { try { // 1. Remove regions from META LOG.debug("Deleting regions from META"); - MetaTableAccessor.deleteRegions(this.server.getShortCircuitConnection(), regions); + MetaTableAccessor.deleteRegions(this.server.getConnection(), regions); // ----------------------------------------------------------------------- // NOTE: At this point we still have data on disk, but nothing in hbase:meta diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java index 455a6ce..ee97616 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java @@ -80,7 +80,7 @@ public class DisableTableHandler extends EventHandler { boolean success = false; try { // Check if table exists - if (!MetaTableAccessor.tableExists(this.server.getShortCircuitConnection(), tableName)) { + if (!MetaTableAccessor.tableExists(this.server.getConnection(), tableName)) { throw new TableNotFoundException(tableName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java index 3d48124..280e3e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java @@ -91,7 +91,7 @@ public class EnableTableHandler extends EventHandler { boolean success = false; try { // Check if table exists - if (!MetaTableAccessor.tableExists(this.server.getShortCircuitConnection(), tableName)) { + if (!MetaTableAccessor.tableExists(this.server.getConnection(), tableName)) { // retainAssignment is true only during recovery. In normal case it is false if (!this.skipTableStateCheck) { throw new TableNotFoundException(tableName); @@ -177,7 +177,7 @@ public class EnableTableHandler extends EventHandler { server.getZooKeeper()); } else { tableRegionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations( - server.getShortCircuitConnection(), tableName, true); + server.getConnection(), tableName, true); } int countOfRegionsInTable = tableRegionsAndLocations.size(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java index 73208bc..23e41d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java @@ -148,7 +148,7 @@ public class MetaServerShutdownHandler extends ServerShutdownHandler { throws InterruptedException, IOException, KeeperException { long timeout = this.server.getConfiguration(). getLong("hbase.catalog.verification.timeout", 1000); - if (!server.getMetaTableLocator().verifyMetaRegionLocation(server.getShortCircuitConnection(), + if (!server.getMetaTableLocator().verifyMetaRegionLocation(server.getConnection(), this.server.getZooKeeper(), timeout)) { this.services.getAssignmentManager().assignMeta(); } else if (serverName.equals(server.getMetaTableLocator().getMetaRegionLocation( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java index a778c26..b35de6a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java @@ -25,15 +25,14 @@ import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableDescriptor; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; @@ -44,7 +43,6 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.Bytes; @InterfaceAudience.Private @@ -101,19 +99,14 @@ public class ModifyTableHandler extends TableEventHandler { Set tableRows = new HashSet(); Scan scan = MetaTableAccessor.getScanForTableName(table); scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); - Table htable = null; - try { - htable = new HTable(masterServices.getConfiguration(), TableName.META_TABLE_NAME); - ResultScanner resScanner = htable.getScanner(scan); + Connection connection = this.masterServices.getConnection(); + try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { + ResultScanner resScanner = metaTable.getScanner(scan); for (Result result : resScanner) { tableRows.add(result.getRow()); } MetaTableAccessor.removeRegionReplicasFromMeta(tableRows, newReplicaCount, - oldReplicaCount - newReplicaCount, masterServices.getShortCircuitConnection()); - } finally { - if (htable != null) { - htable.close(); - } + oldReplicaCount - newReplicaCount, masterServices.getConnection()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java index 66c45a4..0c67154 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java @@ -28,19 +28,21 @@ import java.util.TreeMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.CoordinatedStateException; -import org.apache.hadoop.hbase.TableDescriptor; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; @@ -48,10 +50,10 @@ import org.apache.hadoop.hbase.master.BulkReOpen; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; /** * Base class for performing operations against tables. @@ -130,7 +132,7 @@ public abstract class TableEventHandler extends EventHandler { if (TableName.META_TABLE_NAME.equals(tableName)) { hris = new MetaTableLocator().getMetaRegions(server.getZooKeeper()); } else { - hris = MetaTableAccessor.getTableRegions(server.getShortCircuitConnection(), tableName); + hris = MetaTableAccessor.getTableRegions(server.getConnection(), tableName); } handleTableOperation(hris); if (eventType.isOnlineSchemaChangeSupported() && this.masterServices. @@ -175,32 +177,32 @@ public abstract class TableEventHandler extends EventHandler { public boolean reOpenAllRegions(List regions) throws IOException { boolean done = false; LOG.info("Bucketing regions by region server..."); - HTable table = new HTable(masterServices.getConfiguration(), tableName); - TreeMap> serverToRegions = Maps - .newTreeMap(); - NavigableMap hriHserverMapping; - try { - hriHserverMapping = table.getRegionLocations(); - } finally { - table.close(); + List regionLocations = null; + Connection connection = this.masterServices.getConnection(); + try (RegionLocator locator = connection.getRegionLocator(tableName)) { + regionLocations = locator.getAllRegionLocations(); } - + // Convert List to Map. + NavigableMap hri2Sn = new TreeMap(); + for (HRegionLocation location: regionLocations) { + hri2Sn.put(location.getRegionInfo(), location.getServerName()); + } + TreeMap> serverToRegions = Maps.newTreeMap(); List reRegions = new ArrayList(); for (HRegionInfo hri : regions) { - ServerName rsLocation = hriHserverMapping.get(hri); - + ServerName sn = hri2Sn.get(hri); // Skip the offlined split parent region // See HBASE-4578 for more information. - if (null == rsLocation) { + if (null == sn) { LOG.info("Skip " + hri); continue; } - if (!serverToRegions.containsKey(rsLocation)) { + if (!serverToRegions.containsKey(sn)) { LinkedList hriList = Lists.newLinkedList(); - serverToRegions.put(rsLocation, hriList); + serverToRegions.put(sn, hriList); } reRegions.add(hri); - serverToRegions.get(rsLocation).add(hri); + serverToRegions.get(sn).add(hri); } LOG.info("Reopening " + reRegions.size() + " regions on " diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java index c5790e1..893fd37 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java @@ -125,7 +125,7 @@ public class TruncateTableHandler extends DeleteTableHandler { } // 4. Add regions to META - MetaTableAccessor.addRegionsToMeta(masterServices.getShortCircuitConnection(), + MetaTableAccessor.addRegionsToMeta(masterServices.getConnection(), regionInfos); // 5. Trigger immediate assignment of the regions in round-robin fashion diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java index 16d305e..c9fc93b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/CloneSnapshotHandler.java @@ -141,7 +141,7 @@ public class CloneSnapshotHandler extends CreateTableHandler implements Snapshot protected void addRegionsToMeta(final List regionInfos) throws IOException { super.addRegionsToMeta(regionInfos); - metaChanges.updateMetaParentRegions(this.server.getShortCircuitConnection(), regionInfos); + metaChanges.updateMetaParentRegions(this.server.getConnection(), regionInfos); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java index af4eb09..efafaf9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java @@ -155,7 +155,7 @@ public final class MasterSnapshotVerifier { if (TableName.META_TABLE_NAME.equals(tableName)) { regions = new MetaTableLocator().getMetaRegions(services.getZooKeeper()); } else { - regions = MetaTableAccessor.getTableRegions(services.getShortCircuitConnection(), tableName); + regions = MetaTableAccessor.getTableRegions(services.getConnection(), tableName); } // Remove the non-default regions RegionReplicaUtil.removeNonDefaultRegions(regions); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java index ff074e8..57895e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/RestoreSnapshotHandler.java @@ -109,7 +109,7 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho @Override protected void handleTableOperation(List hris) throws IOException { MasterFileSystem fileSystemManager = masterServices.getMasterFileSystem(); - Connection conn = masterServices.getShortCircuitConnection(); + Connection conn = masterServices.getConnection(); FileSystem fs = fileSystemManager.getFileSystem(); Path rootDir = fileSystemManager.getRootDir(); TableName tableName = hTableDescriptor.getTableName(); @@ -163,7 +163,7 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho if (metaChanges.hasRegionsToRestore()) { MetaTableAccessor.overwriteRegions(conn, metaChanges.getRegionsToRestore()); } - metaChanges.updateMetaParentRegions(this.server.getShortCircuitConnection(), hris); + metaChanges.updateMetaParentRegions(this.server.getConnection(), hris); // At this point the restore is complete. Next step is enabling the table. LOG.info("Restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index 65c7670..d2e11a2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -721,7 +721,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, manifest); // Execute the restore/clone operation - if (MetaTableAccessor.tableExists(master.getShortCircuitConnection(), tableName)) { + if (MetaTableAccessor.tableExists(master.getConnection(), tableName)) { if (master.getTableStateManager().isTableState( TableName.valueOf(snapshot.getTable()), TableState.State.ENABLED)) { throw new UnsupportedOperationException("Table '" + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java index 94275c8..5fd4aaa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java @@ -174,7 +174,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh server.getZooKeeper()); } else { regionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations( - server.getShortCircuitConnection(), snapshotTable, false); + server.getConnection(), snapshotTable, false); } // run the snapshot diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java index bc248b9..3b0f1e3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java @@ -132,7 +132,7 @@ public class MasterFlushTableProcedureManager extends MasterProcedureManager { master.getZooKeeper()); } else { regionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations( - master.getShortCircuitConnection(), tableName, false); + master.getConnection(), tableName, false); } } catch (InterruptedException e1) { String msg = "Failed to get regions for '" + desc.getInstance() + "'"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java index 885862c..6a57156 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java @@ -23,31 +23,21 @@ import java.util.HashSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; -import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; -import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.handler.CreateTableHandler; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle; -import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest; -import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleType; -import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaScope; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeUnit; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota; /** * Master Quota Manager. @@ -80,7 +70,7 @@ public class MasterQuotaManager { } // Create the quota table if missing - if (!MetaTableAccessor.tableExists(masterServices.getShortCircuitConnection(), + if (!MetaTableAccessor.tableExists(masterServices.getConnection(), QuotaUtil.QUOTA_TABLE_NAME)) { LOG.info("Quota table not found. Creating..."); createQuotaTable(); @@ -101,10 +91,6 @@ public class MasterQuotaManager { return enabled; } - private Configuration getConfiguration() { - return masterServices.getConfiguration(); - } - /* ========================================================================== * Admin operations to manage the quota table */ @@ -152,15 +138,15 @@ public class MasterQuotaManager { setQuota(req, new SetQuotaOperations() { @Override public Quotas fetch() throws IOException { - return QuotaUtil.getUserQuota(getConfiguration(), userName); + return QuotaUtil.getUserQuota(masterServices.getConnection(), userName); } @Override public void update(final Quotas quotas) throws IOException { - QuotaUtil.addUserQuota(getConfiguration(), userName, quotas); + QuotaUtil.addUserQuota(masterServices.getConnection(), userName, quotas); } @Override public void delete() throws IOException { - QuotaUtil.deleteUserQuota(masterServices.getConfiguration(), userName); + QuotaUtil.deleteUserQuota(masterServices.getConnection(), userName); } @Override public void preApply(final Quotas quotas) throws IOException { @@ -178,15 +164,15 @@ public class MasterQuotaManager { setQuota(req, new SetQuotaOperations() { @Override public Quotas fetch() throws IOException { - return QuotaUtil.getUserQuota(getConfiguration(), userName, table); + return QuotaUtil.getUserQuota(masterServices.getConnection(), userName, table); } @Override public void update(final Quotas quotas) throws IOException { - QuotaUtil.addUserQuota(getConfiguration(), userName, table, quotas); + QuotaUtil.addUserQuota(masterServices.getConnection(), userName, table, quotas); } @Override public void delete() throws IOException { - QuotaUtil.deleteUserQuota(masterServices.getConfiguration(), userName, table); + QuotaUtil.deleteUserQuota(masterServices.getConnection(), userName, table); } @Override public void preApply(final Quotas quotas) throws IOException { @@ -204,15 +190,15 @@ public class MasterQuotaManager { setQuota(req, new SetQuotaOperations() { @Override public Quotas fetch() throws IOException { - return QuotaUtil.getUserQuota(getConfiguration(), userName, namespace); + return QuotaUtil.getUserQuota(masterServices.getConnection(), userName, namespace); } @Override public void update(final Quotas quotas) throws IOException { - QuotaUtil.addUserQuota(getConfiguration(), userName, namespace, quotas); + QuotaUtil.addUserQuota(masterServices.getConnection(), userName, namespace, quotas); } @Override public void delete() throws IOException { - QuotaUtil.deleteUserQuota(masterServices.getConfiguration(), userName, namespace); + QuotaUtil.deleteUserQuota(masterServices.getConnection(), userName, namespace); } @Override public void preApply(final Quotas quotas) throws IOException { @@ -230,15 +216,15 @@ public class MasterQuotaManager { setQuota(req, new SetQuotaOperations() { @Override public Quotas fetch() throws IOException { - return QuotaUtil.getTableQuota(getConfiguration(), table); + return QuotaUtil.getTableQuota(masterServices.getConnection(), table); } @Override public void update(final Quotas quotas) throws IOException { - QuotaUtil.addTableQuota(getConfiguration(), table, quotas); + QuotaUtil.addTableQuota(masterServices.getConnection(), table, quotas); } @Override public void delete() throws IOException { - QuotaUtil.deleteTableQuota(getConfiguration(), table); + QuotaUtil.deleteTableQuota(masterServices.getConnection(), table); } @Override public void preApply(final Quotas quotas) throws IOException { @@ -256,15 +242,15 @@ public class MasterQuotaManager { setQuota(req, new SetQuotaOperations() { @Override public Quotas fetch() throws IOException { - return QuotaUtil.getNamespaceQuota(getConfiguration(), namespace); + return QuotaUtil.getNamespaceQuota(masterServices.getConnection(), namespace); } @Override public void update(final Quotas quotas) throws IOException { - QuotaUtil.addNamespaceQuota(getConfiguration(), namespace, quotas); + QuotaUtil.addNamespaceQuota(masterServices.getConnection(), namespace, quotas); } @Override public void delete() throws IOException { - QuotaUtil.deleteNamespaceQuota(getConfiguration(), namespace); + QuotaUtil.deleteNamespaceQuota(masterServices.getConnection(), namespace); } @Override public void preApply(final Quotas quotas) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java index c44a737..8cd402d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java @@ -23,19 +23,16 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentSkipListSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -240,7 +237,7 @@ public class QuotaCache implements Stoppable { @Override public Map fetchEntries(final List gets) throws IOException { - return QuotaUtil.fetchNamespaceQuotas(QuotaCache.this.getConfiguration(), gets); + return QuotaUtil.fetchNamespaceQuotas(rsServices.getConnection(), gets); } }); } @@ -255,7 +252,7 @@ public class QuotaCache implements Stoppable { @Override public Map fetchEntries(final List gets) throws IOException { - return QuotaUtil.fetchTableQuotas(QuotaCache.this.getConfiguration(), gets); + return QuotaUtil.fetchTableQuotas(rsServices.getConnection(), gets); } }); } @@ -272,7 +269,7 @@ public class QuotaCache implements Stoppable { @Override public Map fetchEntries(final List gets) throws IOException { - return QuotaUtil.fetchUserQuotas(QuotaCache.this.getConfiguration(), gets); + return QuotaUtil.fetchUserQuotas(rsServices.getConnection(), gets); } }); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java index 5db30eb..bff648d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java @@ -19,15 +19,12 @@ package org.apache.hadoop.hbase.quotas; import java.io.IOException; - import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -35,18 +32,19 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.security.UserGroupInformation; /** * Helper class to interact with the quota table @@ -85,90 +83,89 @@ public class QuotaUtil extends QuotaTableUtil { /* ========================================================================= * Quota "settings" helpers */ - public static void addTableQuota(final Configuration conf, final TableName table, + public static void addTableQuota(final Connection connection, final TableName table, final Quotas data) throws IOException { - addQuotas(conf, getTableRowKey(table), data); + addQuotas(connection, getTableRowKey(table), data); } - public static void deleteTableQuota(final Configuration conf, final TableName table) + public static void deleteTableQuota(final Connection connection, final TableName table) throws IOException { - deleteQuotas(conf, getTableRowKey(table)); + deleteQuotas(connection, getTableRowKey(table)); } - public static void addNamespaceQuota(final Configuration conf, final String namespace, + public static void addNamespaceQuota(final Connection connection, final String namespace, final Quotas data) throws IOException { - addQuotas(conf, getNamespaceRowKey(namespace), data); + addQuotas(connection, getNamespaceRowKey(namespace), data); } - public static void deleteNamespaceQuota(final Configuration conf, final String namespace) + public static void deleteNamespaceQuota(final Connection connection, final String namespace) throws IOException { - deleteQuotas(conf, getNamespaceRowKey(namespace)); + deleteQuotas(connection, getNamespaceRowKey(namespace)); } - public static void addUserQuota(final Configuration conf, final String user, + public static void addUserQuota(final Connection connection, final String user, final Quotas data) throws IOException { - addQuotas(conf, getUserRowKey(user), data); + addQuotas(connection, getUserRowKey(user), data); } - public static void addUserQuota(final Configuration conf, final String user, + public static void addUserQuota(final Connection connection, final String user, final TableName table, final Quotas data) throws IOException { - addQuotas(conf, getUserRowKey(user), - getSettingsQualifierForUserTable(table), data); + addQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserTable(table), data); } - public static void addUserQuota(final Configuration conf, final String user, + public static void addUserQuota(final Connection connection, final String user, final String namespace, final Quotas data) throws IOException { - addQuotas(conf, getUserRowKey(user), + addQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserNamespace(namespace), data); } - public static void deleteUserQuota(final Configuration conf, final String user) + public static void deleteUserQuota(final Connection connection, final String user) throws IOException { - deleteQuotas(conf, getUserRowKey(user)); + deleteQuotas(connection, getUserRowKey(user)); } - public static void deleteUserQuota(final Configuration conf, final String user, + public static void deleteUserQuota(final Connection connection, final String user, final TableName table) throws IOException { - deleteQuotas(conf, getUserRowKey(user), + deleteQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserTable(table)); } - public static void deleteUserQuota(final Configuration conf, final String user, + public static void deleteUserQuota(final Connection connection, final String user, final String namespace) throws IOException { - deleteQuotas(conf, getUserRowKey(user), + deleteQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserNamespace(namespace)); } - private static void addQuotas(final Configuration conf, final byte[] rowKey, + private static void addQuotas(final Connection connection, final byte[] rowKey, final Quotas data) throws IOException { - addQuotas(conf, rowKey, QUOTA_QUALIFIER_SETTINGS, data); + addQuotas(connection, rowKey, QUOTA_QUALIFIER_SETTINGS, data); } - private static void addQuotas(final Configuration conf, final byte[] rowKey, + private static void addQuotas(final Connection connection, final byte[] rowKey, final byte[] qualifier, final Quotas data) throws IOException { Put put = new Put(rowKey); put.add(QUOTA_FAMILY_INFO, qualifier, quotasToData(data)); - doPut(conf, put); + doPut(connection, put); } - private static void deleteQuotas(final Configuration conf, final byte[] rowKey) + private static void deleteQuotas(final Connection connection, final byte[] rowKey) throws IOException { - deleteQuotas(conf, rowKey, null); + deleteQuotas(connection, rowKey, null); } - private static void deleteQuotas(final Configuration conf, final byte[] rowKey, + private static void deleteQuotas(final Connection connection, final byte[] rowKey, final byte[] qualifier) throws IOException { Delete delete = new Delete(rowKey); if (qualifier != null) { delete.deleteColumns(QUOTA_FAMILY_INFO, qualifier); } - doDelete(conf, delete); + doDelete(connection, delete); } - public static Map fetchUserQuotas(final Configuration conf, + public static Map fetchUserQuotas(final Connection connection, final List gets) throws IOException { long nowTs = EnvironmentEdgeManager.currentTime(); - Result[] results = doGet(conf, gets); + Result[] results = doGet(connection, gets); Map userQuotas = new HashMap(results.length); for (int i = 0; i < results.length; ++i) { @@ -207,9 +204,9 @@ public class QuotaUtil extends QuotaTableUtil { return userQuotas; } - public static Map fetchTableQuotas(final Configuration conf, + public static Map fetchTableQuotas(final Connection connection, final List gets) throws IOException { - return fetchGlobalQuotas("table", conf, gets, new KeyFromRow() { + return fetchGlobalQuotas("table", connection, gets, new KeyFromRow() { @Override public TableName getKeyFromRow(final byte[] row) { assert isTableRowKey(row); @@ -218,9 +215,9 @@ public class QuotaUtil extends QuotaTableUtil { }); } - public static Map fetchNamespaceQuotas(final Configuration conf, + public static Map fetchNamespaceQuotas(final Connection connection, final List gets) throws IOException { - return fetchGlobalQuotas("namespace", conf, gets, new KeyFromRow() { + return fetchGlobalQuotas("namespace", connection, gets, new KeyFromRow() { @Override public String getKeyFromRow(final byte[] row) { assert isNamespaceRowKey(row); @@ -230,9 +227,10 @@ public class QuotaUtil extends QuotaTableUtil { } public static Map fetchGlobalQuotas(final String type, - final Configuration conf, final List gets, final KeyFromRow kfr) throws IOException { + final Connection connection, final List gets, final KeyFromRow kfr) + throws IOException { long nowTs = EnvironmentEdgeManager.currentTime(); - Result[] results = doGet(conf, gets); + Result[] results = doGet(connection, gets); Map globalQuotas = new HashMap(results.length); for (int i = 0; i < results.length; ++i) { @@ -266,23 +264,17 @@ public class QuotaUtil extends QuotaTableUtil { /* ========================================================================= * HTable helpers */ - private static void doPut(final Configuration conf, final Put put) - throws IOException { - HTable table = new HTable(conf, QuotaUtil.QUOTA_TABLE_NAME); - try { + private static void doPut(final Connection connection, final Put put) + throws IOException { + try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) { table.put(put); - } finally { - table.close(); } } - private static void doDelete(final Configuration conf, final Delete delete) - throws IOException { - HTable table = new HTable(conf, QuotaUtil.QUOTA_TABLE_NAME); - try { + private static void doDelete(final Connection connection, final Delete delete) + throws IOException { + try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) { table.delete(delete); - } finally { - table.close(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index ed04a86..1366e70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -75,9 +75,9 @@ import org.apache.hadoop.hbase.YouAreDeadException; import org.apache.hadoop.hbase.ZNodeClearer; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.ConnectionUtils; -import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.conf.ConfigurationManager; +import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; import org.apache.hadoop.hbase.coordination.SplitLogWorkerCoordination; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; @@ -206,12 +206,12 @@ public class HRegionServer extends HasThread implements protected HeapMemoryManager hMemManager; - /* - * Short-circuit (ie. bypassing RPC layer) HConnection to this Server - * to be used internally for miscellaneous needs. Initialized at the server startup - * and closed when server shuts down. Clients must never close it explicitly. + /** + * Cluster connection to be shared by services. + * Initialized at server startup and closed when server shuts down. + * Clients must never close it explicitly. */ - protected HConnection shortCircuitConnection; + protected ClusterConnection clusterConnection; /* * Long-living meta table locator, which is created when the server is started and stopped @@ -604,15 +604,20 @@ public class HRegionServer extends HasThread implements } /** - * Create wrapped short-circuit connection to this server. - * In its own method so can intercept and mock it over in tests. + * Create a 'smarter' HConnection, one that is capable of by-passing RPC if the request is to + * the local server. Safe to use going to local or remote server. + * Create this instance in a method can be intercepted and mocked in tests. * @throws IOException */ - protected HConnection createShortCircuitConnection() throws IOException { + @VisibleForTesting + protected ClusterConnection createClusterConnection() throws IOException { + // Create a cluster connection that when appropriate, can short-circuit and go directly to the + // local server if the request is to the local server bypassing RPC. Can be used for both local + // and remote invocations. return ConnectionUtils.createShortCircuitHConnection( - HConnectionManager.getConnection(conf), serverName, rpcServices, rpcServices); + ConnectionFactory.createConnection(conf), serverName, rpcServices, rpcServices); } - + /** * Run test on configured codecs to make sure supporting libs are in place. * @param c @@ -635,6 +640,17 @@ public class HRegionServer extends HasThread implements } /** + * Setup our cluster connection if not already initialized. + * @throws IOException + */ + protected synchronized void setupClusterConnection() throws IOException { + if (clusterConnection == null) { + clusterConnection = createClusterConnection(); + metaTableLocator = new MetaTableLocator(); + } + } + + /** * All initialization needed before we go register with Master. * * @throws IOException @@ -642,12 +658,7 @@ public class HRegionServer extends HasThread implements */ private void preRegistrationInitialization(){ try { - synchronized (this) { - if (shortCircuitConnection == null) { - shortCircuitConnection = createShortCircuitConnection(); - metaTableLocator = new MetaTableLocator(); - } - } + setupClusterConnection(); // Health checker thread. if (isHealthCheckerConfigured()) { @@ -945,13 +956,13 @@ public class HRegionServer extends HasThread implements // so callers waiting for meta without timeout can stop if (this.metaTableLocator != null) this.metaTableLocator.stop(); - if (this.shortCircuitConnection != null && !shortCircuitConnection.isClosed()) { + if (this.clusterConnection != null && !clusterConnection.isClosed()) { try { - this.shortCircuitConnection.close(); + this.clusterConnection.close(); } catch (IOException e) { // Although the {@link Closeable} interface throws an {@link // IOException}, in reality, the implementation would never do that. - LOG.error("Attempt to close server's short circuit HConnection failed.", e); + LOG.warn("Attempt to close server's short circuit HConnection failed.", e); } } @@ -1736,8 +1747,8 @@ public class HRegionServer extends HasThread implements } @Override - public HConnection getShortCircuitConnection() { - return this.shortCircuitConnection; + public ClusterConnection getConnection() { + return this.clusterConnection; } @Override @@ -1828,7 +1839,7 @@ public class HRegionServer extends HasThread implements } } else { try { - MetaTableAccessor.updateRegionLocation(shortCircuitConnection, + MetaTableAccessor.updateRegionLocation(clusterConnection, hris[0], serverName, openSeqNum); } catch (IOException e) { LOG.info("Failed to update meta", e); @@ -3046,7 +3057,7 @@ public class HRegionServer extends HasThread implements } return result; } - + public CoprocessorServiceResponse execRegionServerService(final RpcController controller, final CoprocessorServiceRequest serviceRequest) throws ServiceException { try { @@ -3093,7 +3104,7 @@ public class HRegionServer extends HasThread implements throw new ServiceException(ie); } } - + /** * @return The cache config instance used by the regionserver. */ @@ -3107,7 +3118,7 @@ public class HRegionServer extends HasThread implements protected ConfigurationManager getConfigurationManager() { return configurationManager; } - + /** * Reload the configuration from disk. */ @@ -3115,6 +3126,6 @@ public class HRegionServer extends HasThread implements LOG.info("Reloading the configuration from disk."); // Reload the configuration from disk. conf.reloadConfiguration(); - configurationManager.notifyAllObservers(conf); + configurationManager.notifyAllObservers(conf); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java index 377d548..d478bfe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java @@ -658,7 +658,7 @@ public class RegionMergeTransaction { // Get merge regions if it is a merged region and already has merge // qualifier Pair mergeRegions = MetaTableAccessor - .getRegionsFromMergeQualifier(services.getShortCircuitConnection(), regionName); + .getRegionsFromMergeQualifier(services.getConnection(), regionName); if (mergeRegions != null && (mergeRegions.getFirst() != null || mergeRegions.getSecond() != null)) { // It has merge qualifier diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java index 0408231..997692f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java @@ -24,12 +24,12 @@ import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; +import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.CoordinatedStateManager; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -150,11 +150,6 @@ public class ReplicationSyncUp extends Configured implements Tool { } @Override - public HConnection getShortCircuitConnection() { - return null; - } - - @Override public MetaTableLocator getMetaTableLocator() { return null; } @@ -181,5 +176,10 @@ public class ReplicationSyncUp extends Configured implements Tool { public boolean isStopped() { return false; } + + @Override + public ClusterConnection getConnection() { + return null; + } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java index 5ca1164..4af28b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java @@ -33,7 +33,6 @@ import java.util.TreeSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -44,9 +43,11 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -170,12 +171,11 @@ public class AccessControlLists { Bytes.toString(key)+": "+Bytes.toStringBinary(value) ); } - Table acls = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - acls.put(p); - } finally { - if (acls != null) acls.close(); + // TODO: Pass in a Connection rather than create one each time. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + table.put(p); + } } } @@ -200,13 +200,12 @@ public class AccessControlLists { if (LOG.isDebugEnabled()) { LOG.debug("Removing permission "+ userPerm.toString()); } - d.deleteColumns(ACL_LIST_FAMILY, key); - Table acls = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - acls.delete(d); - } finally { - if (acls != null) acls.close(); + d.addColumns(ACL_LIST_FAMILY, key); + // TODO: Pass in a Connection rather than create one each time. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + table.delete(d); + } } } @@ -220,13 +219,11 @@ public class AccessControlLists { if (LOG.isDebugEnabled()) { LOG.debug("Removing permissions of removed table "+ tableName); } - - Table acls = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - acls.delete(d); - } finally { - if (acls != null) acls.close(); + // TODO: Pass in a Connection rather than create one each time. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + table.delete(d); + } } } @@ -241,12 +238,10 @@ public class AccessControlLists { LOG.debug("Removing permissions of removed namespace "+ namespace); } - Table acls = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - acls.delete(d); - } finally { - if (acls != null) acls.close(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + table.delete(d); + } } } @@ -260,41 +255,38 @@ public class AccessControlLists { LOG.debug("Removing permissions of removed column " + Bytes.toString(column) + " from table "+ tableName); } - - Table acls = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - - Scan scan = new Scan(); - scan.addFamily(ACL_LIST_FAMILY); - - String columnName = Bytes.toString(column); - scan.setFilter(new QualifierFilter(CompareOp.EQUAL, new RegexStringComparator( - String.format("(%s%s%s)|(%s%s)$", - ACL_KEY_DELIMITER, columnName, ACL_KEY_DELIMITER, - ACL_KEY_DELIMITER, columnName)))); - - Set qualifierSet = new TreeSet(Bytes.BYTES_COMPARATOR); - ResultScanner scanner = acls.getScanner(scan); - try { - for (Result res : scanner) { - for (byte[] q : res.getFamilyMap(ACL_LIST_FAMILY).navigableKeySet()) { - qualifierSet.add(q); + // TODO: Pass in a Connection rather than create one each time. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + Scan scan = new Scan(); + scan.addFamily(ACL_LIST_FAMILY); + + String columnName = Bytes.toString(column); + scan.setFilter(new QualifierFilter(CompareOp.EQUAL, new RegexStringComparator( + String.format("(%s%s%s)|(%s%s)$", + ACL_KEY_DELIMITER, columnName, ACL_KEY_DELIMITER, + ACL_KEY_DELIMITER, columnName)))); + + Set qualifierSet = new TreeSet(Bytes.BYTES_COMPARATOR); + ResultScanner scanner = table.getScanner(scan); + try { + for (Result res : scanner) { + for (byte[] q : res.getFamilyMap(ACL_LIST_FAMILY).navigableKeySet()) { + qualifierSet.add(q); + } } + } finally { + scanner.close(); } - } finally { - scanner.close(); - } - if (qualifierSet.size() > 0) { - Delete d = new Delete(tableName.getName()); - for (byte[] qualifier : qualifierSet) { - d.deleteColumns(ACL_LIST_FAMILY, qualifier); + if (qualifierSet.size() > 0) { + Delete d = new Delete(tableName.getName()); + for (byte[] qualifier : qualifierSet) { + d.addColumns(ACL_LIST_FAMILY, qualifier); + } + table.delete(d); } - acls.delete(d); } - } finally { - if (acls != null) acls.close(); } } @@ -422,19 +414,20 @@ public class AccessControlLists { Scan scan = new Scan(); scan.addFamily(ACL_LIST_FAMILY); - Table acls = null; ResultScanner scanner = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - scanner = acls.getScanner(scan); - for (Result row : scanner) { - ListMultimap resultPerms = - parsePermissions(row.getRow(), row); - allPerms.put(row.getRow(), resultPerms); + // TODO: Pass in a Connection rather than create one each time. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + scanner = table.getScanner(scan); + try { + for (Result row : scanner) { + ListMultimap resultPerms = parsePermissions(row.getRow(), row); + allPerms.put(row.getRow(), resultPerms); + } + } finally { + if (scanner != null) scanner.close(); + } } - } finally { - if (scanner != null) scanner.close(); - if (acls != null) acls.close(); } return allPerms; @@ -465,20 +458,19 @@ public class AccessControlLists { // for normal user tables, we just read the table row from _acl_ ListMultimap perms = ArrayListMultimap.create(); - Table acls = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - Get get = new Get(entryName); - get.addFamily(ACL_LIST_FAMILY); - Result row = acls.get(get); - if (!row.isEmpty()) { - perms = parsePermissions(entryName, row); - } else { - LOG.info("No permissions found in " + ACL_TABLE_NAME + " for acl entry " - + Bytes.toString(entryName)); + // TODO: Pass in a Connection rather than create one each time. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + Get get = new Get(entryName); + get.addFamily(ACL_LIST_FAMILY); + Result row = table.get(get); + if (!row.isEmpty()) { + perms = parsePermissions(entryName, row); + } else { + LOG.info("No permissions found in " + ACL_TABLE_NAME + " for acl entry " + + Bytes.toString(entryName)); + } } - } finally { - if (acls != null) acls.close(); } return perms; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 1c6a341..1218368 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -1087,7 +1087,7 @@ public class AccessController extends BaseMasterAndRegionObserver public void postStartMaster(ObserverContext ctx) throws IOException { if (!MetaTableAccessor.tableExists(ctx.getEnvironment().getMasterServices() - .getShortCircuitConnection(), AccessControlLists.ACL_TABLE_NAME)) { + .getConnection(), AccessControlLists.ACL_TABLE_NAME)) { // initialize the ACL storage table AccessControlLists.createACLTable(ctx.getEnvironment().getMasterServices()); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java index 3a37049..e3c4f53 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java @@ -23,13 +23,15 @@ import java.lang.reflect.UndeclaredThrowableException; import java.security.PrivilegedExceptionAction; import com.google.protobuf.ServiceException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -45,6 +47,7 @@ import org.apache.hadoop.security.token.Token; */ @InterfaceAudience.Private public class TokenUtil { + // This class is referenced indirectly by User out in common; instances are created by reflection private static Log LOG = LogFactory.getLog(TokenUtil.class); /** @@ -54,21 +57,19 @@ public class TokenUtil { */ public static Token obtainToken( Configuration conf) throws IOException { - Table meta = null; - try { - meta = new HTable(conf, TableName.META_TABLE_NAME); - CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW); - AuthenticationProtos.AuthenticationService.BlockingInterface service = - AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel); - AuthenticationProtos.GetAuthenticationTokenResponse response = service.getAuthenticationToken(null, - AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance()); + // TODO: Pass in a Connection to used. Will this even work? + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table meta = connection.getTable(TableName.META_TABLE_NAME)) { + CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW); + AuthenticationProtos.AuthenticationService.BlockingInterface service = + AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel); + AuthenticationProtos.GetAuthenticationTokenResponse response = + service.getAuthenticationToken(null, + AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance()); - return ProtobufUtil.toToken(response.getToken()); - } catch (ServiceException se) { - ProtobufUtil.toIOException(se); - } finally { - if (meta != null) { - meta.close(); + return ProtobufUtil.toToken(response.getToken()); + } catch (ServiceException se) { + ProtobufUtil.toIOException(se); } } // dummy return for ServiceException catch block diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index aaad8ba..c76f562 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -168,7 +168,7 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements public void postStartMaster(ObserverContext ctx) throws IOException { // Need to create the new system table for labels here MasterServices master = ctx.getEnvironment().getMasterServices(); - if (!MetaTableAccessor.tableExists(master.getShortCircuitConnection(), LABELS_TABLE_NAME)) { + if (!MetaTableAccessor.tableExists(master.getConnection(), LABELS_TABLE_NAME)) { HTableDescriptor labelsTable = new HTableDescriptor(LABELS_TABLE_NAME); HColumnDescriptor labelsColumn = new HColumnDescriptor(LABELS_TABLE_FAMILY); labelsColumn.setBloomFilterType(BloomType.NONE); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index 539ba70..23673b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.tool; +import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -39,15 +40,17 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; @@ -228,51 +231,59 @@ public final class Canary implements Tool { } } - // start to prepare the stuffs + // Start to prepare the stuffs Monitor monitor = null; Thread monitorThread = null; long startTime = 0; long currentTimeLength = 0; + // Get a connection to use in below. + // try-with-resources jdk7 construct. See + // http://docs.oracle.com/javase/tutorial/essential/exceptions/tryResourceClose.html + try (Connection connection = ConnectionFactory.createConnection(this.conf)) { + do { + // Do monitor !! + try { + monitor = this.newMonitor(connection, index, args); + monitorThread = new Thread(monitor); + startTime = System.currentTimeMillis(); + monitorThread.start(); + while (!monitor.isDone()) { + // wait for 1 sec + Thread.sleep(1000); + // exit if any error occurs + if (this.failOnError && monitor.hasError()) { + monitorThread.interrupt(); + if (monitor.initialized) { + System.exit(monitor.errorCode); + } else { + System.exit(INIT_ERROR_EXIT_CODE); + } + } + currentTimeLength = System.currentTimeMillis() - startTime; + if (currentTimeLength > this.timeout) { + LOG.error("The monitor is running too long (" + currentTimeLength + + ") after timeout limit:" + this.timeout + + " will be killed itself !!"); + if (monitor.initialized) { + System.exit(TIMEOUT_ERROR_EXIT_CODE); + } else { + System.exit(INIT_ERROR_EXIT_CODE); + } + break; + } + } - do { - // do monitor !! - monitor = this.newMonitor(index, args); - monitorThread = new Thread(monitor); - startTime = System.currentTimeMillis(); - monitorThread.start(); - while (!monitor.isDone()) { - // wait for 1 sec - Thread.sleep(1000); - // exit if any error occurs - if (this.failOnError && monitor.hasError()) { - monitorThread.interrupt(); - if (monitor.initialized) { + if (this.failOnError && monitor.hasError()) { + monitorThread.interrupt(); System.exit(monitor.errorCode); - } else { - System.exit(INIT_ERROR_EXIT_CODE); - } - } - currentTimeLength = System.currentTimeMillis() - startTime; - if (currentTimeLength > this.timeout) { - LOG.error("The monitor is running too long (" + currentTimeLength - + ") after timeout limit:" + this.timeout - + " will be killed itself !!"); - if (monitor.initialized) { - System.exit(TIMEOUT_ERROR_EXIT_CODE); - } else { - System.exit(INIT_ERROR_EXIT_CODE); } - break; + } finally { + if (monitor != null) monitor.close(); } - } - - if (this.failOnError && monitor.hasError()) { - monitorThread.interrupt(); - System.exit(monitor.errorCode); - } - Thread.sleep(interval); - } while (interval > 0); + Thread.sleep(interval); + } while (interval > 0); + } // try-with-resources close return(monitor.errorCode); } @@ -296,13 +307,13 @@ public final class Canary implements Tool { } /** - * a Factory method for {@link Monitor}. - * Can be overrided by user. + * A Factory method for {@link Monitor}. + * Can be overridden by user. * @param index a start index for monitor target * @param args args passed from user * @return a Monitor instance */ - public Monitor newMonitor(int index, String[] args) { + public Monitor newMonitor(final Connection connection, int index, String[] args) { Monitor monitor = null; String[] monitorTargets = null; @@ -314,20 +325,20 @@ public final class Canary implements Tool { if(this.regionServerMode) { monitor = new RegionServerMonitor( - this.conf, + connection, monitorTargets, this.useRegExp, (ExtendedSink)this.sink); } else { - monitor = new RegionMonitor(this.conf, monitorTargets, this.useRegExp, this.sink); + monitor = new RegionMonitor(connection, monitorTargets, this.useRegExp, this.sink); } return monitor; } // a Monitor super-class can be extended by users - public static abstract class Monitor implements Runnable { + public static abstract class Monitor implements Runnable, Closeable { - protected Configuration config; + protected Connection connection; protected Admin admin; protected String[] targets; protected boolean useRegExp; @@ -345,12 +356,16 @@ public final class Canary implements Tool { return errorCode != 0; } - protected Monitor(Configuration config, String[] monitorTargets, + @Override + public void close() throws IOException { + if (this.admin != null) this.admin.close(); + } + + protected Monitor(Connection connection, String[] monitorTargets, boolean useRegExp, Sink sink) { - if (null == config) - throw new IllegalArgumentException("config shall not be null"); + if (null == connection) throw new IllegalArgumentException("connection shall not be null"); - this.config = config; + this.connection = connection; this.targets = monitorTargets; this.useRegExp = useRegExp; this.sink = sink; @@ -361,7 +376,7 @@ public final class Canary implements Tool { protected boolean initAdmin() { if (null == this.admin) { try { - this.admin = new HBaseAdmin(config); + this.admin = this.connection.getAdmin(); } catch (Exception e) { LOG.error("Initial HBaseAdmin failed...", e); this.errorCode = INIT_ERROR_EXIT_CODE; @@ -377,9 +392,9 @@ public final class Canary implements Tool { // a monitor for region mode private static class RegionMonitor extends Monitor { - public RegionMonitor(Configuration config, String[] monitorTargets, + public RegionMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, Sink sink) { - super(config, monitorTargets, useRegExp, sink); + super(connection, monitorTargets, useRegExp, sink); } @Override @@ -481,7 +496,7 @@ public final class Canary implements Tool { Table table = null; try { - table = new HTable(admin.getConfiguration(), tableDesc.getTableName()); + table = admin.getConnection().getTable(tableDesc.getTableName()); } catch (TableNotFoundException e) { return; } @@ -556,9 +571,9 @@ public final class Canary implements Tool { //a monitor for regionserver mode private static class RegionServerMonitor extends Monitor { - public RegionServerMonitor(Configuration config, String[] monitorTargets, + public RegionServerMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, ExtendedSink sink) { - super(config, monitorTargets, useRegExp, sink); + super(connection, monitorTargets, useRegExp, sink); } private ExtendedSink getSink() { @@ -622,7 +637,7 @@ public final class Canary implements Tool { region = entry.getValue().get(0); try { tableName = region.getTable(); - table = new HTable(this.admin.getConfiguration(), tableName); + table = admin.getConnection().getTable(tableName); startKey = region.getStartKey(); // Can't do a get on empty start row so do a Scan of first element if any instead. if(startKey.length > 0) { @@ -675,18 +690,19 @@ public final class Canary implements Tool { private Map> getAllRegionServerByName() { Map> rsAndRMap = new HashMap>(); - HTable table = null; + Table table = null; + RegionLocator regionLocator = null; try { HTableDescriptor[] tableDescs = this.admin.listTables(); List regions = null; for (HTableDescriptor tableDesc : tableDescs) { - table = new HTable(this.admin.getConfiguration(), tableDesc.getTableName()); + table = this.admin.getConnection().getTable(tableDesc.getTableName()); + regionLocator = this.admin.getConnection().getRegionLocator(tableDesc.getTableName()); - for (Map.Entry entry : table - .getRegionLocations().entrySet()) { - ServerName rs = entry.getValue(); + for (HRegionLocation location: regionLocator.getAllRegionLocations()) { + ServerName rs = location.getServerName(); String rsName = rs.getHostname(); - HRegionInfo r = entry.getKey(); + HRegionInfo r = location.getRegionInfo(); if (rsAndRMap.containsKey(rsName)) { regions = rsAndRMap.get(rsName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java index bffda57..fd890b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java @@ -21,11 +21,11 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; import java.math.BigInteger; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.LinkedList; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.TreeMap; @@ -39,23 +39,28 @@ import org.apache.commons.lang.ArrayUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.NoServerForRegionException; +import org.apache.hadoop.hbase.client.RegionLocator; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import com.google.common.base.Preconditions; @@ -332,11 +337,11 @@ public class RegionSplitter { if (2 != cmd.getArgList().size() || !oneOperOnly || cmd.hasOption("h")) { new HelpFormatter().printHelp("RegionSplitter \n"+ - "SPLITALGORITHM is a java class name of a class implementing " + - "SplitAlgorithm, or one of the special strings HexStringSplit " + - "or UniformSplit, which are built-in split algorithms. " + - "HexStringSplit treats keys as hexadecimal ASCII, and " + - "UniformSplit treats keys as arbitrary bytes.", opt); + "SPLITALGORITHM is a java class name of a class implementing " + + "SplitAlgorithm, or one of the special strings HexStringSplit " + + "or UniformSplit, which are built-in split algorithms. " + + "HexStringSplit treats keys as hexadecimal ASCII, and " + + "UniformSplit treats keys as arbitrary bytes.", opt); return; } TableName tableName = TableName.valueOf(cmd.getArgs()[0]); @@ -364,8 +369,8 @@ public class RegionSplitter { } static void createPresplitTable(TableName tableName, SplitAlgorithm splitAlgo, - String[] columnFamilies, Configuration conf) throws IOException, - InterruptedException { + String[] columnFamilies, Configuration conf) + throws IOException, InterruptedException { final int splitCount = conf.getInt("split.count", 0); Preconditions.checkArgument(splitCount > 1, "Split count must be > 1"); @@ -378,237 +383,260 @@ public class RegionSplitter { for (String cf : columnFamilies) { desc.addFamily(new HColumnDescriptor(Bytes.toBytes(cf))); } - HBaseAdmin admin = new HBaseAdmin(conf); - try { - Preconditions.checkArgument(!admin.tableExists(tableName), - "Table already exists: " + tableName); - admin.createTable(desc, splitAlgo.split(splitCount)); - } finally { - admin.close(); - } - LOG.debug("Table created! Waiting for regions to show online in META..."); - if (!conf.getBoolean("split.verify", true)) { - // NOTE: createTable is synchronous on the table, but not on the regions - int onlineRegions = 0; - while (onlineRegions < splitCount) { - onlineRegions = MetaTableAccessor.getRegionCount(conf, tableName); - LOG.debug(onlineRegions + " of " + splitCount + " regions online..."); - if (onlineRegions < splitCount) { - Thread.sleep(10 * 1000); // sleep + try (Connection connection = ConnectionFactory.createConnection(conf)) { + Admin admin = connection.getAdmin(); + try { + Preconditions.checkArgument(!admin.tableExists(tableName), + "Table already exists: " + tableName); + admin.createTable(desc, splitAlgo.split(splitCount)); + } finally { + admin.close(); + } + LOG.debug("Table created! Waiting for regions to show online in META..."); + if (!conf.getBoolean("split.verify", true)) { + // NOTE: createTable is synchronous on the table, but not on the regions + int onlineRegions = 0; + while (onlineRegions < splitCount) { + onlineRegions = MetaTableAccessor.getRegionCount(connection, tableName); + LOG.debug(onlineRegions + " of " + splitCount + " regions online..."); + if (onlineRegions < splitCount) { + Thread.sleep(10 * 1000); // sleep + } } } + LOG.debug("Finished creating table with " + splitCount + " regions"); } - - LOG.debug("Finished creating table with " + splitCount + " regions"); } - static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, - Configuration conf) throws IOException, InterruptedException { - final int minOS = conf.getInt("split.outstanding", 2); - - HTable table = new HTable(conf, tableName); - - // max outstanding splits. default == 50% of servers - final int MAX_OUTSTANDING = - Math.max(table.getConnection().getCurrentNrHRS() / 2, minOS); - - Path hbDir = FSUtils.getRootDir(conf); - Path tableDir = FSUtils.getTableDir(hbDir, table.getName()); - Path splitFile = new Path(tableDir, "_balancedSplit"); - FileSystem fs = FileSystem.get(conf); - - // get a list of daughter regions to create - LinkedList> tmpRegionSet = getSplits(table, splitAlgo); - LinkedList> outstanding = Lists.newLinkedList(); - int splitCount = 0; - final int origCount = tmpRegionSet.size(); - - // all splits must compact & we have 1 compact thread, so 2 split - // requests to the same RS can stall the outstanding split queue. - // To fix, group the regions into an RS pool and round-robin through it - LOG.debug("Bucketing regions by regionserver..."); - TreeMap>> daughterRegions = - Maps.newTreeMap(); - for (Pair dr : tmpRegionSet) { - String rsLocation = table.getRegionLocation(dr.getSecond()). - getHostnamePort(); - if (!daughterRegions.containsKey(rsLocation)) { - LinkedList> entry = Lists.newLinkedList(); - daughterRegions.put(rsLocation, entry); - } - daughterRegions.get(rsLocation).add(dr); + /** + * Alternative getCurrentNrHRS which is no longer available. + * @param connection + * @return Rough count of regionservers out on cluster. + * @throws IOException + */ + private static int getRegionServerCount(final Connection connection) throws IOException { + try (Admin admin = connection.getAdmin()) { + ClusterStatus status = admin.getClusterStatus(); + Collection servers = status.getServers(); + return servers == null || servers.isEmpty()? 0: servers.size(); } - LOG.debug("Done with bucketing. Split time!"); - long startTime = System.currentTimeMillis(); - - // open the split file and modify it as splits finish - FSDataInputStream tmpIn = fs.open(splitFile); - byte[] rawData = new byte[tmpIn.available()]; - tmpIn.readFully(rawData); - tmpIn.close(); - FSDataOutputStream splitOut = fs.create(splitFile); - splitOut.write(rawData); + } + private static byte [] readFile(final FileSystem fs, final Path path) throws IOException { + FSDataInputStream tmpIn = fs.open(path); try { - // *** split code *** - while (!daughterRegions.isEmpty()) { - LOG.debug(daughterRegions.size() + " RS have regions to splt."); - - // Get RegionServer : region count mapping - final TreeMap rsSizes = Maps.newTreeMap(); - Map regionsInfo = table.getRegionLocations(); - for (ServerName rs : regionsInfo.values()) { - if (rsSizes.containsKey(rs)) { - rsSizes.put(rs, rsSizes.get(rs) + 1); - } else { - rsSizes.put(rs, 1); - } - } + byte [] rawData = new byte[tmpIn.available()]; + tmpIn.readFully(rawData); + return rawData; + } finally { + tmpIn.close(); + } + } - // sort the RS by the number of regions they have - List serversLeft = Lists.newArrayList(daughterRegions .keySet()); - Collections.sort(serversLeft, new Comparator() { - public int compare(String o1, String o2) { - return rsSizes.get(o1).compareTo(rsSizes.get(o2)); + static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configuration conf) + throws IOException, InterruptedException { + final int minOS = conf.getInt("split.outstanding", 2); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + // Max outstanding splits. default == 50% of servers + final int MAX_OUTSTANDING = Math.max(getRegionServerCount(connection) / 2, minOS); + + Path hbDir = FSUtils.getRootDir(conf); + Path tableDir = FSUtils.getTableDir(hbDir, tableName); + Path splitFile = new Path(tableDir, "_balancedSplit"); + FileSystem fs = FileSystem.get(conf); + + // Get a list of daughter regions to create + LinkedList> tmpRegionSet = null; + try (Table table = connection.getTable(tableName)) { + tmpRegionSet = getSplits(connection, tableName, splitAlgo); + } + LinkedList> outstanding = Lists.newLinkedList(); + int splitCount = 0; + final int origCount = tmpRegionSet.size(); + + // all splits must compact & we have 1 compact thread, so 2 split + // requests to the same RS can stall the outstanding split queue. + // To fix, group the regions into an RS pool and round-robin through it + LOG.debug("Bucketing regions by regionserver..."); + TreeMap>> daughterRegions = + Maps.newTreeMap(); + // Get a regionLocator. Need it in below. + try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) { + for (Pair dr : tmpRegionSet) { + String rsLocation = regionLocator.getRegionLocation(dr.getSecond()).getHostnamePort(); + if (!daughterRegions.containsKey(rsLocation)) { + LinkedList> entry = Lists.newLinkedList(); + daughterRegions.put(rsLocation, entry); } - }); - - // round-robin through the RS list. Choose the lightest-loaded servers - // first to keep the master from load-balancing regions as we split. - for (String rsLoc : serversLeft) { - Pair dr = null; - - // find a region in the RS list that hasn't been moved - LOG.debug("Finding a region on " + rsLoc); - LinkedList> regionList = daughterRegions - .get(rsLoc); - while (!regionList.isEmpty()) { - dr = regionList.pop(); - - // get current region info - byte[] split = dr.getSecond(); - HRegionLocation regionLoc = table.getRegionLocation(split); - - // if this region moved locations - String newRs = regionLoc.getHostnamePort(); - if (newRs.compareTo(rsLoc) != 0) { - LOG.debug("Region with " + splitAlgo.rowToStr(split) - + " moved to " + newRs + ". Relocating..."); - // relocate it, don't use it right now - if (!daughterRegions.containsKey(newRs)) { - LinkedList> entry = Lists.newLinkedList(); - daughterRegions.put(newRs, entry); - } - daughterRegions.get(newRs).add(dr); - dr = null; - continue; - } + daughterRegions.get(rsLocation).add(dr); + } + LOG.debug("Done with bucketing. Split time!"); + long startTime = System.currentTimeMillis(); - // make sure this region wasn't already split - byte[] sk = regionLoc.getRegionInfo().getStartKey(); - if (sk.length != 0) { - if (Bytes.equals(split, sk)) { - LOG.debug("Region already split on " - + splitAlgo.rowToStr(split) + ". Skipping this region..."); - ++splitCount; - dr = null; - continue; - } - byte[] start = dr.getFirst(); - Preconditions.checkArgument(Bytes.equals(start, sk), splitAlgo - .rowToStr(start) + " != " + splitAlgo.rowToStr(sk)); - } + // Open the split file and modify it as splits finish + byte[] rawData = readFile(fs, splitFile); - // passed all checks! found a good region - break; - } - if (regionList.isEmpty()) { - daughterRegions.remove(rsLoc); - } - if (dr == null) - continue; + FSDataOutputStream splitOut = fs.create(splitFile); + try { + splitOut.write(rawData); - // we have a good region, time to split! - byte[] split = dr.getSecond(); - LOG.debug("Splitting at " + splitAlgo.rowToStr(split)); - HBaseAdmin admin = new HBaseAdmin(table.getConfiguration()); try { - admin.split(table.getTableName(), split); - } finally { - admin.close(); - } + // *** split code *** + while (!daughterRegions.isEmpty()) { + LOG.debug(daughterRegions.size() + " RS have regions to splt."); + + // Get ServerName to region count mapping + final TreeMap rsSizes = Maps.newTreeMap(); + List hrls = regionLocator.getAllRegionLocations(); + for (HRegionLocation hrl: hrls) { + ServerName sn = hrl.getServerName(); + if (rsSizes.containsKey(sn)) { + rsSizes.put(sn, rsSizes.get(sn) + 1); + } else { + rsSizes.put(sn, 1); + } + } - LinkedList> finished = Lists.newLinkedList(); - LinkedList> local_finished = Lists.newLinkedList(); - if (conf.getBoolean("split.verify", true)) { - // we need to verify and rate-limit our splits - outstanding.addLast(dr); - // with too many outstanding splits, wait for some to finish - while (outstanding.size() >= MAX_OUTSTANDING) { - LOG.debug("Wait for outstanding splits " + outstanding.size()); - local_finished = splitScan(outstanding, table, splitAlgo); - if (local_finished.isEmpty()) { - Thread.sleep(30 * 1000); - } else { - finished.addAll(local_finished); - outstanding.removeAll(local_finished); - LOG.debug(local_finished.size() + " outstanding splits finished"); + // Sort the ServerNames by the number of regions they have + List serversLeft = Lists.newArrayList(daughterRegions .keySet()); + Collections.sort(serversLeft, new Comparator() { + public int compare(String o1, String o2) { + return rsSizes.get(o1).compareTo(rsSizes.get(o2)); + } + }); + + // Round-robin through the ServerName list. Choose the lightest-loaded servers + // first to keep the master from load-balancing regions as we split. + for (String rsLoc : serversLeft) { + Pair dr = null; + + // Find a region in the ServerName list that hasn't been moved + LOG.debug("Finding a region on " + rsLoc); + LinkedList> regionList = daughterRegions.get(rsLoc); + while (!regionList.isEmpty()) { + dr = regionList.pop(); + + // get current region info + byte[] split = dr.getSecond(); + HRegionLocation regionLoc = regionLocator.getRegionLocation(split); + + // if this region moved locations + String newRs = regionLoc.getHostnamePort(); + if (newRs.compareTo(rsLoc) != 0) { + LOG.debug("Region with " + splitAlgo.rowToStr(split) + + " moved to " + newRs + ". Relocating..."); + // relocate it, don't use it right now + if (!daughterRegions.containsKey(newRs)) { + LinkedList> entry = Lists.newLinkedList(); + daughterRegions.put(newRs, entry); + } + daughterRegions.get(newRs).add(dr); + dr = null; + continue; + } + + // make sure this region wasn't already split + byte[] sk = regionLoc.getRegionInfo().getStartKey(); + if (sk.length != 0) { + if (Bytes.equals(split, sk)) { + LOG.debug("Region already split on " + + splitAlgo.rowToStr(split) + ". Skipping this region..."); + ++splitCount; + dr = null; + continue; + } + byte[] start = dr.getFirst(); + Preconditions.checkArgument(Bytes.equals(start, sk), splitAlgo + .rowToStr(start) + " != " + splitAlgo.rowToStr(sk)); + } + + // passed all checks! found a good region + break; + } + if (regionList.isEmpty()) { + daughterRegions.remove(rsLoc); + } + if (dr == null) + continue; + + // we have a good region, time to split! + byte[] split = dr.getSecond(); + LOG.debug("Splitting at " + splitAlgo.rowToStr(split)); + try (Admin admin = connection.getAdmin()) { + admin.split(tableName, split); + } + + LinkedList> finished = Lists.newLinkedList(); + LinkedList> local_finished = Lists.newLinkedList(); + if (conf.getBoolean("split.verify", true)) { + // we need to verify and rate-limit our splits + outstanding.addLast(dr); + // with too many outstanding splits, wait for some to finish + while (outstanding.size() >= MAX_OUTSTANDING) { + LOG.debug("Wait for outstanding splits " + outstanding.size()); + local_finished = splitScan(outstanding, connection, tableName, splitAlgo); + if (local_finished.isEmpty()) { + Thread.sleep(30 * 1000); + } else { + finished.addAll(local_finished); + outstanding.removeAll(local_finished); + LOG.debug(local_finished.size() + " outstanding splits finished"); + } + } + } else { + finished.add(dr); + } + + // mark each finished region as successfully split. + for (Pair region : finished) { + splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) + + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); + splitCount++; + if (splitCount % 10 == 0) { + long tDiff = (System.currentTimeMillis() - startTime) + / splitCount; + LOG.debug("STATUS UPDATE: " + splitCount + " / " + origCount + + ". Avg Time / Split = " + + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); + } + } } } - } else { - finished.add(dr); - } - - // mark each finished region as successfully split. - for (Pair region : finished) { - splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) - + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); - splitCount++; - if (splitCount % 10 == 0) { - long tDiff = (System.currentTimeMillis() - startTime) - / splitCount; - LOG.debug("STATUS UPDATE: " + splitCount + " / " + origCount - + ". Avg Time / Split = " - + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); + if (conf.getBoolean("split.verify", true)) { + while (!outstanding.isEmpty()) { + LOG.debug("Finally Wait for outstanding splits " + outstanding.size()); + LinkedList> finished = splitScan(outstanding, + connection, tableName, splitAlgo); + if (finished.isEmpty()) { + Thread.sleep(30 * 1000); + } else { + outstanding.removeAll(finished); + for (Pair region : finished) { + splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) + + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); + splitCount++; + } + LOG.debug("Finally " + finished.size() + " outstanding splits finished"); + } + } } - } - } - } - if (conf.getBoolean("split.verify", true)) { - while (!outstanding.isEmpty()) { - LOG.debug("Finally Wait for outstanding splits " + outstanding.size()); - LinkedList> finished = splitScan(outstanding, - table, splitAlgo); - if (finished.isEmpty()) { - Thread.sleep(30 * 1000); - } else { - outstanding.removeAll(finished); - for (Pair region : finished) { - splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) - + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); - splitCount++; + LOG.debug("All regions have been successfully split!"); + } finally { + long tDiff = System.currentTimeMillis() - startTime; + LOG.debug("TOTAL TIME = " + + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); + LOG.debug("Splits = " + splitCount); + if (0 < splitCount) { + LOG.debug("Avg Time / Split = " + + org.apache.hadoop.util.StringUtils.formatTime(tDiff / splitCount)); } - LOG.debug("Finally " + finished.size() + " outstanding splits finished"); } + fs.delete(splitFile, false); + } finally { + splitOut.close(); } } - LOG.debug("All regions have been successfully split!"); - } finally { - long tDiff = System.currentTimeMillis() - startTime; - LOG.debug("TOTAL TIME = " - + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); - LOG.debug("Splits = " + splitCount); - if (0 < splitCount) { - LOG.debug("Avg Time / Split = " - + org.apache.hadoop.util.StringUtils.formatTime(tDiff / splitCount)); - } - - splitOut.close(); - if (table != null){ - table.close(); - } } - fs.delete(splitFile, false); } /** @@ -647,108 +675,134 @@ public class RegionSplitter { } static LinkedList> splitScan( - LinkedList> regionList, HTable table, + LinkedList> regionList, + final Connection connection, + final TableName tableName, SplitAlgorithm splitAlgo) throws IOException, InterruptedException { LinkedList> finished = Lists.newLinkedList(); LinkedList> logicalSplitting = Lists.newLinkedList(); LinkedList> physicalSplitting = Lists.newLinkedList(); - // get table info - Path rootDir = FSUtils.getRootDir(table.getConfiguration()); - Path tableDir = FSUtils.getTableDir(rootDir, table.getName()); - FileSystem fs = tableDir.getFileSystem(table.getConfiguration()); - HTableDescriptor htd = table.getTableDescriptor(); - - // clear the cache to forcibly refresh region information - table.clearRegionCache(); - - // for every region that hasn't been verified as a finished split - for (Pair region : regionList) { - byte[] start = region.getFirst(); - byte[] split = region.getSecond(); - - // see if the new split daughter region has come online - try { - HRegionInfo dri = table.getRegionLocation(split).getRegionInfo(); - if (dri.isOffline() || !Bytes.equals(dri.getStartKey(), split)) { + // Get table info + Pair tableDirAndSplitFile = + getTableDirAndSplitFile(connection.getConfiguration(), tableName); + Path tableDir = tableDirAndSplitFile.getFirst(); + FileSystem fs = tableDir.getFileSystem(connection.getConfiguration()); + // Clear the cache to forcibly refresh region information + ((ClusterConnection)connection).clearRegionCache(); + HTableDescriptor htd = null; + try (Table table = connection.getTable(tableName)) { + htd = table.getTableDescriptor(); + } + try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) { + + // for every region that hasn't been verified as a finished split + for (Pair region : regionList) { + byte[] start = region.getFirst(); + byte[] split = region.getSecond(); + + // see if the new split daughter region has come online + try { + HRegionInfo dri = regionLocator.getRegionLocation(split).getRegionInfo(); + if (dri.isOffline() || !Bytes.equals(dri.getStartKey(), split)) { + logicalSplitting.add(region); + continue; + } + } catch (NoServerForRegionException nsfre) { + // NSFRE will occur if the old hbase:meta entry has no server assigned + LOG.info(nsfre); logicalSplitting.add(region); continue; } - } catch (NoServerForRegionException nsfre) { - // NSFRE will occur if the old hbase:meta entry has no server assigned - LOG.info(nsfre); - logicalSplitting.add(region); - continue; - } - try { - // when a daughter region is opened, a compaction is triggered - // wait until compaction completes for both daughter regions - LinkedList check = Lists.newLinkedList(); - check.add(table.getRegionLocation(start).getRegionInfo()); - check.add(table.getRegionLocation(split).getRegionInfo()); - for (HRegionInfo hri : check.toArray(new HRegionInfo[check.size()])) { - byte[] sk = hri.getStartKey(); - if (sk.length == 0) - sk = splitAlgo.firstRow(); - String startKey = splitAlgo.rowToStr(sk); - - HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem( - table.getConfiguration(), fs, tableDir, hri, true); - - // check every Column Family for that region - boolean refFound = false; - for (HColumnDescriptor c : htd.getFamilies()) { - if ((refFound = regionFs.hasReferences(htd.getTableName().getNameAsString()))) { - break; + try { + // when a daughter region is opened, a compaction is triggered + // wait until compaction completes for both daughter regions + LinkedList check = Lists.newLinkedList(); + check.add(regionLocator.getRegionLocation(start).getRegionInfo()); + check.add(regionLocator.getRegionLocation(split).getRegionInfo()); + for (HRegionInfo hri : check.toArray(new HRegionInfo[check.size()])) { + byte[] sk = hri.getStartKey(); + if (sk.length == 0) + sk = splitAlgo.firstRow(); + + HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem( + connection.getConfiguration(), fs, tableDir, hri, true); + + // Check every Column Family for that region -- check does not have references. + boolean refFound = false; + for (HColumnDescriptor c : htd.getFamilies()) { + if ((refFound = regionFs.hasReferences(c.getNameAsString()))) { + break; + } } - } - // compaction is completed when all reference files are gone - if (!refFound) { - check.remove(hri); + // compaction is completed when all reference files are gone + if (!refFound) { + check.remove(hri); + } } - } - if (check.isEmpty()) { - finished.add(region); - } else { + if (check.isEmpty()) { + finished.add(region); + } else { + physicalSplitting.add(region); + } + } catch (NoServerForRegionException nsfre) { + LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start)); physicalSplitting.add(region); + ((ClusterConnection)connection).clearRegionCache(); } - } catch (NoServerForRegionException nsfre) { - LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start)); - physicalSplitting.add(region); - table.clearRegionCache(); } - } - LOG.debug("Split Scan: " + finished.size() + " finished / " - + logicalSplitting.size() + " split wait / " - + physicalSplitting.size() + " reference wait"); + LOG.debug("Split Scan: " + finished.size() + " finished / " + + logicalSplitting.size() + " split wait / " + + physicalSplitting.size() + " reference wait"); - return finished; + return finished; + } } - static LinkedList> getSplits(HTable table, - SplitAlgorithm splitAlgo) throws IOException { - Path hbDir = FSUtils.getRootDir(table.getConfiguration()); - Path tableDir = FSUtils.getTableDir(hbDir, table.getName()); + /** + * @param conf + * @param tableName + * @return A Pair where first item is table dir and second is the split file. + * @throws IOException + */ + private static Pair getTableDirAndSplitFile(final Configuration conf, + final TableName tableName) + throws IOException { + Path hbDir = FSUtils.getRootDir(conf); + Path tableDir = FSUtils.getTableDir(hbDir, tableName); Path splitFile = new Path(tableDir, "_balancedSplit"); - FileSystem fs = tableDir.getFileSystem(table.getConfiguration()); + return new Pair(tableDir, splitFile); + } - // using strings because (new byte[]{0}).equals(new byte[]{0}) == false + static LinkedList> getSplits(final Connection connection, + TableName tableName, SplitAlgorithm splitAlgo) + throws IOException { + Pair tableDirAndSplitFile = + getTableDirAndSplitFile(connection.getConfiguration(), tableName); + Path tableDir = tableDirAndSplitFile.getFirst(); + Path splitFile = tableDirAndSplitFile.getSecond(); + + FileSystem fs = tableDir.getFileSystem(connection.getConfiguration()); + + // Using strings because (new byte[]{0}).equals(new byte[]{0}) == false Set> daughterRegions = Sets.newHashSet(); - // does a split file exist? + // Does a split file exist? if (!fs.exists(splitFile)) { // NO = fresh start. calculate splits to make - LOG.debug("No _balancedSplit file. Calculating splits..."); + LOG.debug("No " + splitFile.getName() + " file. Calculating splits "); - // query meta for all regions in the table + // Query meta for all regions in the table Set> rows = Sets.newHashSet(); - Pair tmp = table.getStartEndKeys(); - Preconditions.checkArgument( - tmp.getFirst().length == tmp.getSecond().length, + Pair tmp = null; + try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) { + tmp = regionLocator.getStartEndKeys(); + } + Preconditions.checkArgument(tmp.getFirst().length == tmp.getSecond().length, "Start and End rows should be equivalent"); for (int i = 0; i < tmp.getFirst().length; ++i) { byte[] start = tmp.getFirst()[i], end = tmp.getSecond()[i]; @@ -758,8 +812,7 @@ public class RegionSplitter { end = splitAlgo.lastRow(); rows.add(Pair.newPair(start, end)); } - LOG.debug("Table " + Bytes.toString(table.getTableName()) + " has " - + rows.size() + " regions that will be split."); + LOG.debug("Table " + tableName + " has " + rows.size() + " regions that will be split."); // prepare the split file Path tmpFile = new Path(tableDir, "_balancedSplit_prepare"); @@ -780,8 +833,8 @@ public class RegionSplitter { fs.rename(tmpFile, splitFile); } else { LOG.debug("_balancedSplit file found. Replay log to restore state..."); - FSUtils.getInstance(fs, table.getConfiguration()) - .recoverFileLease(fs, splitFile, table.getConfiguration(), null); + FSUtils.getInstance(fs, connection.getConfiguration()) + .recoverFileLease(fs, splitFile, connection.getConfiguration(), null); // parse split file and process remaining splits FSDataInputStream tmpIn = fs.open(splitFile); @@ -1062,4 +1115,4 @@ public class RegionSplitter { + "," + rowToStr(lastRow()) + "]"; } } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index dbed9d7..ef7b6ff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -307,7 +307,7 @@ public class WALSplitter { return true; } if(csm != null) { - HConnection scc = csm.getServer().getShortCircuitConnection(); + HConnection scc = csm.getServer().getConnection(); TableName[] tables = scc.listTableNames(); for (TableName table : tables) { if (scc.getTableState(table) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index aa319b7..c3ebf19 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -152,11 +152,11 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { private boolean passedZkCluster = false; private MiniDFSCluster dfsCluster = null; - private HBaseCluster hbaseCluster = null; + private volatile HBaseCluster hbaseCluster = null; private MiniMRCluster mrCluster = null; /** If there is a mini cluster running for this testing utility instance. */ - private boolean miniClusterRunning; + private volatile boolean miniClusterRunning; private String hadoopLogDir; @@ -168,6 +168,11 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { private Path dataTestDirOnTestFS = null; /** + * Shared cluster connection. + */ + private volatile Connection connection; + + /** * System property key to get test directory value. * Name is as it is because mini dfs has hard-codings to put test data here. * It should NOT be used directly in HBase, as it's a property used in @@ -965,6 +970,10 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public void shutdownMiniCluster() throws Exception { LOG.info("Shutting down minicluster"); + if (this.connection != null && !this.connection.isClosed()) { + this.connection.close(); + this.connection = null; + } shutdownMiniHBaseCluster(); if (!this.passedZkCluster){ shutdownMiniZKCluster(); @@ -1083,7 +1092,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createTable(TableName tableName, String family) + public Table createTable(TableName tableName, String family) throws IOException{ return createTable(tableName, new String[]{family}); } @@ -1107,7 +1116,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @return An HTable instance for the created table. * @throws IOException */ - public HTable createTable(TableName tableName, String[] families) + public Table createTable(TableName tableName, String[] families) throws IOException { List fams = new ArrayList(families.length); for (String family : families) { @@ -1146,13 +1155,13 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * Create a table. * @param tableName * @param families - * @return An HTable instance for the created table. + * @return An HT + * able instance for the created table. * @throws IOException */ public HTable createTable(TableName tableName, byte[][] families) throws IOException { - return createTable(tableName, families, - new Configuration(getConfiguration())); + return createTable(tableName, families, new Configuration(getConfiguration())); } public HTable createTable(byte[] tableName, byte[][] families, @@ -1203,7 +1212,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { getHBaseAdmin().createTable(htd); // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned waitUntilAllRegionsAssigned(htd.getTableName()); - return new HTable(c, htd.getTableName()); + return (HTable)getConnection().getTable(htd.getTableName()); } /** @@ -2566,6 +2575,22 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } /** + * Get a Connection to the cluster. + * Not thread-safe (This class needs a lot of work to make it thread-safe). + * @return A Connection that can be shared. Don't close. Will be closed on shutdown of cluster. + * @throws IOException + */ + public Connection getConnection() throws IOException { + if (this.connection == null) { + if (getMiniHBaseCluster() == null) { + throw new IllegalStateException("You cannot have a Connection if cluster is not up"); + } + this.connection = ConnectionFactory.createConnection(this.conf); + } + return this.connection; + } + + /** * Returns a Admin instance. * This instance is shared between HBaseTestingUtility instance users. * Closing it has no effect, it will be closed automatically when the @@ -2577,21 +2602,22 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { public synchronized HBaseAdmin getHBaseAdmin() throws IOException { if (hbaseAdmin == null){ - hbaseAdmin = new HBaseAdminForTests(getConfiguration()); + this.hbaseAdmin = new HBaseAdminForTests(getConnection()); } return hbaseAdmin; } private HBaseAdminForTests hbaseAdmin = null; private static class HBaseAdminForTests extends HBaseAdmin { - public HBaseAdminForTests(Configuration c) throws MasterNotRunningException, + public HBaseAdminForTests(Connection connection) throws MasterNotRunningException, ZooKeeperConnectionException, IOException { - super(c); + super(connection); } @Override public synchronized void close() throws IOException { - LOG.warn("close() called on HBaseAdmin instance returned from HBaseTestingUtility.getHBaseAdmin()"); + LOG.warn("close() called on HBaseAdmin instance returned from " + + "HBaseTestingUtility.getHBaseAdmin()"); } private synchronized void close0() throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java index 226d0c0..4a26a23 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MetaMockingUtil.java @@ -47,7 +47,7 @@ public class MetaMockingUtil { * Returns a Result object constructed from the given region information simulating * a catalog table result. * @param region the HRegionInfo object or null - * @param ServerName to use making startcode and server hostname:port in meta or null + * @param sn to use making startcode and server hostname:port in meta or null * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. * @throws IOException */ @@ -60,7 +60,7 @@ public class MetaMockingUtil { * Returns a Result object constructed from the given region information simulating * a catalog table result. * @param region the HRegionInfo object or null - * @param ServerName to use making startcode and server hostname:port in meta or null + * @param sn to use making startcode and server hostname:port in meta or null * @param splita daughter region or null * @param splitb daughter region or null * @return A mocked up Result that fakes a Get on a row in the hbase:meta table. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java index d1ac1fd..11f3a7a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java @@ -27,7 +27,7 @@ import java.util.concurrent.ConcurrentSkipListMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.ipc.RpcServerInterface; @@ -138,7 +138,7 @@ class MockRegionServerServices implements RegionServerServices { } @Override - public HConnection getShortCircuitConnection() { + public ClusterConnection getConnection() { return null; } @@ -266,4 +266,4 @@ class MockRegionServerServices implements RegionServerServices { // TODO Auto-generated method stub return false; } -} +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java index 3bcb0bb..e9da497 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java @@ -274,7 +274,7 @@ public class TestAcidGuarantees implements Tool { } // Add a flusher ctx.addThread(new RepeatingTestThread(ctx) { - HBaseAdmin admin = new HBaseAdmin(util.getConfiguration()); + HBaseAdmin admin = util.getHBaseAdmin(); public void doAnAction() throws Exception { try { admin.flush(TABLE_NAME); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java index 76bc94b..a9493c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java @@ -28,12 +28,12 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; @@ -50,11 +50,6 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; -import org.apache.hadoop.hdfs.DFSClient; -import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; -import org.apache.hadoop.hdfs.server.namenode.LeaseManager; -import org.apache.log4j.Level; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -259,13 +254,13 @@ public class TestIOFencing { LOG.info("Starting mini cluster"); TEST_UTIL.startMiniCluster(1); CompactionBlockerRegion compactingRegion = null; - HBaseAdmin admin = null; + Admin admin = null; try { LOG.info("Creating admin"); - admin = new HBaseAdmin(c); + admin = TEST_UTIL.getConnection().getAdmin(); LOG.info("Creating table"); TEST_UTIL.createTable(TABLE_NAME, FAMILY); - HTable table = new HTable(c, TABLE_NAME); + Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME); LOG.info("Loading test table"); // Find the region List testRegions = TEST_UTIL.getMiniHBaseCluster().findRegionsForTable(TABLE_NAME); @@ -299,7 +294,7 @@ public class TestIOFencing { assertTrue(compactingRegion.countStoreFiles() > 1); final byte REGION_NAME[] = compactingRegion.getRegionName(); LOG.info("Asking for compaction"); - admin.majorCompact(TABLE_NAME.getName()); + ((HBaseAdmin)admin).majorCompact(TABLE_NAME.getName()); LOG.info("Waiting for compaction to be about to start"); compactingRegion.waitForCompactionToBlock(); LOG.info("Starting a new server"); @@ -339,7 +334,7 @@ public class TestIOFencing { // If we survive the split keep going... // Now we make sure that the region isn't totally confused. Load up more rows. TEST_UTIL.loadNumericRows(table, FAMILY, FIRST_BATCH_COUNT, FIRST_BATCH_COUNT + SECOND_BATCH_COUNT); - admin.majorCompact(TABLE_NAME.getName()); + ((HBaseAdmin)admin).majorCompact(TABLE_NAME.getName()); startWaitTime = System.currentTimeMillis(); while (newRegion.compactCount == 0) { Thread.sleep(1000); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java index 953121f..1757804 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java @@ -35,14 +35,16 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.Store; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; @@ -73,6 +75,7 @@ public class TestZooKeeperTableArchiveClient { private static final byte[] TABLE_NAME = Bytes.toBytes(STRING_TABLE_NAME); private static ZKTableArchiveClient archivingClient; private final List toCleanup = new ArrayList(); + private static ClusterConnection CONNECTION; /** * Setup the config for the cluster @@ -81,8 +84,8 @@ public class TestZooKeeperTableArchiveClient { public static void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); UTIL.startMiniZKCluster(); - archivingClient = new ZKTableArchiveClient(UTIL.getConfiguration(), UTIL.getHBaseAdmin() - .getConnection()); + CONNECTION = (ClusterConnection)ConnectionFactory.createConnection(UTIL.getConfiguration()); + archivingClient = new ZKTableArchiveClient(UTIL.getConfiguration(), CONNECTION); // make hfile archiving node so we can archive files ZooKeeperWatcher watcher = UTIL.getZooKeeperWatcher(); String archivingZNode = ZKTableArchiveClient.getArchiveZNode(UTIL.getConfiguration(), watcher); @@ -115,6 +118,7 @@ public class TestZooKeeperTableArchiveClient { @AfterClass public static void cleanupTest() throws Exception { try { + CONNECTION.close(); UTIL.shutdownMiniZKCluster(); } catch (Exception e) { LOG.warn("problem shutting down cluster", e); @@ -415,4 +419,4 @@ public class TestZooKeeperTableArchiveClient { // stop the cleaner stop.stop(""); } -} +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java index f3bf9c4..86c8e7a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.mockito.Mockito; /** - * {@link HConnection} testing utility. + * {@link ClusterConnection} testing utility. */ public class HConnectionTestingUtility { /* @@ -44,7 +44,7 @@ public class HConnectionTestingUtility { /** * Get a Mocked {@link HConnection} that goes with the passed conf * configuration instance. Minimally the mock will return - * conf when {@link HConnection#getConfiguration()} is invoked. + * conf when {@link ClusterConnection#getConfiguration()} is invoked. * Be sure to shutdown the connection when done by calling * {@link HConnectionManager#deleteConnection(Configuration)} else it * will stick around; this is probably not what you want. @@ -69,7 +69,7 @@ public class HConnectionTestingUtility { /** * Calls {@link #getMockedConnection(Configuration)} and then mocks a few - * more of the popular {@link HConnection} methods so they do 'normal' + * more of the popular {@link ClusterConnection} methods so they do 'normal' * operation (see return doc below for list). Be sure to shutdown the * connection when done by calling * {@link HConnectionManager#deleteConnection(Configuration)} else it @@ -85,12 +85,13 @@ public class HConnectionTestingUtility { * @param hri HRegionInfo to include in the location returned when * getRegionLocator is called on the mocked connection * @return Mock up a connection that returns a {@link Configuration} when - * {@link HConnection#getConfiguration()} is called, a 'location' when - * {@link HConnection#getRegionLocation(org.apache.hadoop.hbase.TableName, byte[], boolean)} is called, + * {@link ClusterConnection#getConfiguration()} is called, a 'location' when + * {@link ClusterConnection#getRegionLocation(org.apache.hadoop.hbase.TableName, byte[], boolean)} + * is called, * and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when - * {@link HConnection#getAdmin(ServerName)} is called, returns the passed + * {@link ClusterConnection#getAdmin(ServerName)} is called, returns the passed * {@link ClientProtos.ClientService.BlockingInterface} instance when - * {@link HConnection#getClient(ServerName)} is called (Be sure to call + * {@link ClusterConnection#getClient(ServerName)} is called (Be sure to call * {@link HConnectionManager#deleteConnection(Configuration)} * when done with this mocked Connection. * @throws IOException @@ -134,11 +135,13 @@ public class HConnectionTestingUtility { Mockito.when(c.getNewRpcRetryingCallerFactory(conf)).thenReturn( RpcRetryingCallerFactory.instantiate(conf, RetryingCallerInterceptorFactory.NO_OP_INTERCEPTOR)); + HTableInterface t = Mockito.mock(HTableInterface.class); + Mockito.when(c.getTable((TableName)Mockito.any())).thenReturn(t); return c; } /** - * Get a Mockito spied-upon {@link HConnection} that goes with the passed + * Get a Mockito spied-upon {@link ClusterConnection} that goes with the passed * conf configuration instance. * Be sure to shutdown the connection when done by calling * {@link HConnectionManager#deleteConnection(Configuration)} else it @@ -149,7 +152,7 @@ public class HConnectionTestingUtility { * @see @link * {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)} */ - public static HConnection getSpiedConnection(final Configuration conf) + public static ClusterConnection getSpiedConnection(final Configuration conf) throws IOException { HConnectionKey connectionKey = new HConnectionKey(conf); synchronized (ConnectionManager.CONNECTION_INSTANCES) { @@ -185,4 +188,4 @@ public class HConnectionTestingUtility { return ConnectionManager.CONNECTION_INSTANCES.size(); } } -} +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java index fd32eef..0e97684 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java @@ -111,13 +111,13 @@ public class TestFromClientSide3 { } private void performMultiplePutAndFlush(HBaseAdmin admin, HTable table, - byte[] row, byte[] family, int nFlushes, int nPuts) throws Exception { + byte[] row, byte[] family, int nFlushes, int nPuts) + throws Exception { // connection needed for poll-wait - HConnection conn = HConnectionManager.getConnection(TEST_UTIL - .getConfiguration()); HRegionLocation loc = table.getRegionLocation(row, true); - AdminProtos.AdminService.BlockingInterface server = conn.getAdmin(loc.getServerName()); + AdminProtos.AdminService.BlockingInterface server = + admin.getConnection().getAdmin(loc.getServerName()); byte[] regName = loc.getRegionInfo().getRegionName(); for (int i = 0; i < nFlushes; i++) { @@ -151,12 +151,10 @@ public class TestFromClientSide3 { TEST_UTIL.getConfiguration().setInt("hbase.hstore.compaction.min", 3); String tableName = "testAdvancedConfigOverride"; - TableName TABLE = - TableName.valueOf(tableName); + TableName TABLE = TableName.valueOf(tableName); HTable hTable = TEST_UTIL.createTable(TABLE, FAMILY, 10); - HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); - HConnection connection = HConnectionManager.getConnection(TEST_UTIL - .getConfiguration()); + HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); + ClusterConnection connection = (ClusterConnection)TEST_UTIL.getConnection(); // Create 3 store files. byte[] row = Bytes.toBytes(random.nextInt()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java index c747f0e..882aece 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java @@ -563,7 +563,7 @@ public class TestRegionObserverInterface { String testName = TestRegionObserverInterface.class.getName()+".bulkLoadHFileTest"; TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".bulkLoadHFileTest"); Configuration conf = util.getConfiguration(); - Table table = util.createTable(tableName, new byte[][] {A, B, C}); + HTable table = util.createTable(tableName, new byte[][] {A, B, C}); try { verifyMethodResult(SimpleRegionObserver.class, new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"}, @@ -577,8 +577,8 @@ public class TestRegionObserverInterface { createHFile(util.getConfiguration(), fs, new Path(familyDir,Bytes.toString(A)), A, A); - //Bulk load - new LoadIncrementalHFiles(conf).doBulkLoad(dir, new HTable(conf, tableName)); + // Bulk load + new LoadIncrementalHFiles(conf).doBulkLoad(dir, table); verifyMethodResult(SimpleRegionObserver.class, new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"}, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java index 5139373..0c30bb2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java @@ -154,7 +154,7 @@ public class TestRegionServerObserver { mergedRegion = rmt.stepsBeforePONR(rs, rs, false); rmt.prepareMutationsForMerge(mergedRegion.getRegionInfo(), regionA.getRegionInfo(), regionB.getRegionInfo(), rs.getServerName(), metaEntries); - MetaTableAccessor.mutateMetaTable(rs.getShortCircuitConnection(), metaEntries); + MetaTableAccessor.mutateMetaTable(rs.getConnection(), metaEntries); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java index 0be8931..e002b8b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -47,9 +48,7 @@ import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; -import org.junit.After; import org.junit.AfterClass; -import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -71,7 +70,6 @@ public class TestChangingEncoding { private static final int TIMEOUT_MS = 600000; - private HBaseAdmin admin; private HColumnDescriptor hcd; private TableName tableName; @@ -93,7 +91,9 @@ public class TestChangingEncoding { HTableDescriptor htd = new HTableDescriptor(tableName); hcd = new HColumnDescriptor(CF); htd.addFamily(hcd); - admin.createTable(htd); + try (Admin admin = TEST_UTIL.getConnection().getAdmin()) { + admin.createTable(htd); + } numBatchesWritten = 0; } @@ -112,16 +112,6 @@ public class TestChangingEncoding { TEST_UTIL.shutdownMiniCluster(); } - @Before - public void setUp() throws Exception { - admin = new HBaseAdmin(conf); - } - - @After - public void tearDown() throws IOException { - admin.close(); - } - private static byte[] getRowKey(int batchId, int i) { return Bytes.toBytes("batch" + batchId + "_row" + i); } @@ -184,12 +174,14 @@ public class TestChangingEncoding { LOG.debug("Setting CF encoding to " + encoding + " (ordinal=" + encoding.ordinal() + "), onlineChange=" + onlineChange); hcd.setDataBlockEncoding(encoding); - if (!onlineChange) { - admin.disableTable(tableName); - } - admin.modifyColumn(tableName, hcd); - if (!onlineChange) { - admin.enableTable(tableName); + try (Admin admin = TEST_UTIL.getConnection().getAdmin()) { + if (!onlineChange) { + admin.disableTable(tableName); + } + admin.modifyColumn(tableName, hcd); + if (!onlineChange) { + admin.enableTable(tableName); + } } // This is a unit test, not integration test. So let's // wait for regions out of transition. Otherwise, for online @@ -227,6 +219,7 @@ public class TestChangingEncoding { private void compactAndWait() throws IOException, InterruptedException { LOG.debug("Compacting table " + tableName); HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0); + HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); admin.majorCompact(tableName); // Waiting for the compaction to start, at least .5s. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java index 6e9f8d8..628bb96 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java @@ -136,7 +136,6 @@ public class TestTableMapReduceUtil { * does not exceed the number of regions for the given table. */ @Test - @SuppressWarnings("deprecation") public void shouldNumberOfReduceTaskNotExceedNumberOfRegionsForGivenTable() throws IOException { Assert.assertNotNull(presidentsTable); @@ -155,7 +154,6 @@ public class TestTableMapReduceUtil { } @Test - @SuppressWarnings("deprecation") public void shouldNumberOfMapTaskNotExceedNumberOfRegionsForGivenTable() throws IOException { Configuration cfg = UTIL.getConfiguration(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 5d8cd19..82d224b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -23,8 +23,8 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.Random; +import java.util.Set; import java.util.TreeMap; import java.util.concurrent.ConcurrentSkipListMap; @@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.executor.ExecutorService; @@ -113,7 +113,7 @@ import com.google.protobuf.ServiceException; * Use this when you can't bend Mockito to your liking (e.g. return null result * when 'scanning' until master timesout and then return a coherent meta row * result thereafter. Have some facility for faking gets and scans. See - * {@link #setGetResult(byte[], byte[], Result)} for how to fill the backing data + * setGetResult(byte[], byte[], Result) for how to fill the backing data * store that the get pulls from. */ class MockRegionServer @@ -283,7 +283,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override - public HConnection getShortCircuitConnection() { + public ClusterConnection getConnection() { return null; } @@ -614,4 +614,4 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { throws ServiceException { return null; } -} +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java index 649d1a5..34c890e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -304,7 +304,7 @@ public class TestActiveMasterManager { } @Override - public HConnection getShortCircuitConnection() { + public ClusterConnection getConnection() { return null; } @@ -321,5 +321,4 @@ public class TestActiveMasterManager { return activeMasterManager; } } -} - +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 9bf3d10..58afc12 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -38,25 +38,28 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.TableDescriptor; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaMockingUtil; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableDescriptors; -import org.apache.hadoop.hbase.MetaMockingUtil; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; +import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination; +import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLogManagerDetails; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator; @@ -99,7 +102,7 @@ public class TestCatalogJanitor { * Be sure to call stop on the way out else could leave some mess around. */ class MockServer implements Server { - private final HConnection connection; + private final ClusterConnection connection; private final Configuration c; MockServer(final HBaseTestingUtility htu) @@ -145,7 +148,7 @@ public class TestCatalogJanitor { } @Override - public HConnection getShortCircuitConnection() { + public ClusterConnection getConnection() { return this.connection; } @@ -171,7 +174,12 @@ public class TestCatalogJanitor { @Override public CoordinatedStateManager getCoordinatedStateManager() { - return null; + BaseCoordinatedStateManager m = Mockito.mock(BaseCoordinatedStateManager.class); + SplitLogManagerCoordination c = Mockito.mock(SplitLogManagerCoordination.class); + Mockito.when(m.getSplitLogManagerCoordination()).thenReturn(c); + SplitLogManagerDetails d = Mockito.mock(SplitLogManagerDetails.class); + Mockito.when(c.getDetails()).thenReturn(d); + return m; } @Override @@ -266,7 +274,7 @@ public class TestCatalogJanitor { } @Override - public HConnection getShortCircuitConnection() { + public ClusterConnection getConnection() { return null; } @@ -885,6 +893,7 @@ public class TestCatalogJanitor { MasterServices services = new MockMasterServices(server); // create the janitor + CatalogJanitor janitor = new CatalogJanitor(server, services); // Create regions. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java index 72403bf..ced0c4c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java @@ -25,9 +25,12 @@ import java.net.InetAddress; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.ClockOutOfSyncException; import org.apache.hadoop.hbase.CoordinatedStateManager; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; @@ -45,7 +48,7 @@ public class TestClockSkewDetection { final Configuration conf = HBaseConfiguration.create(); ServerManager sm = new ServerManager(new Server() { @Override - public HConnection getShortCircuitConnection() { + public ClusterConnection getConnection() { return null; } @@ -89,7 +92,8 @@ public class TestClockSkewDetection { @Override public void stop(String why) { - }}, null, false); + } + }, null, false); LOG.debug("regionServerStartup 1"); InetAddress ia1 = InetAddress.getLocalHost(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java index 73a1663..e2c3e0d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java @@ -87,7 +87,7 @@ public class TestMaster { ht.close(); List> tableRegions = MetaTableAccessor.getTableRegionsAndLocations( - m.getShortCircuitConnection(), TABLENAME); + m.getConnection(), TABLENAME); LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions)); assertEquals(1, tableRegions.size()); assertArrayEquals(HConstants.EMPTY_START_ROW, @@ -104,7 +104,7 @@ public class TestMaster { Thread.sleep(100); } LOG.info("Making sure we can call getTableRegions while opening"); - tableRegions = MetaTableAccessor.getTableRegionsAndLocations(m.getShortCircuitConnection(), + tableRegions = MetaTableAccessor.getTableRegionsAndLocations(m.getConnection(), TABLENAME, false); LOG.info("Regions: " + Joiner.on(',').join(tableRegions)); @@ -115,7 +115,7 @@ public class TestMaster { m.getTableRegionForRow(TABLENAME, Bytes.toBytes("cde")); LOG.info("Result is: " + pair); Pair tableRegionFromName = - MetaTableAccessor.getRegion(m.getShortCircuitConnection(), + MetaTableAccessor.getRegion(m.getConnection(), pair.getFirst().getRegionName()); assertEquals(tableRegionFromName.getFirst(), pair.getFirst()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java index a2dbcb6..f211754 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionLocator; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -218,8 +219,8 @@ public class TestMasterFailover { assertTrue(master.isInitialized()); // Create a table with a region online - RegionLocator onlineTable = TEST_UTIL.createTable(TableName.valueOf("onlineTable"), "family"); - + Table onlineTable = TEST_UTIL.createTable(TableName.valueOf("onlineTable"), "family"); + onlineTable.close(); // Create a table in META, so it has a region offline HTableDescriptor offlineTable = new HTableDescriptor( TableName.valueOf(Bytes.toBytes("offlineTable"))); @@ -232,16 +233,18 @@ public class TestMasterFailover { HRegionInfo hriOffline = new HRegionInfo(offlineTable.getTableName(), null, null); createRegion(hriOffline, rootdir, conf, offlineTable); - MetaTableAccessor.addRegionToMeta(master.getShortCircuitConnection(), hriOffline); + MetaTableAccessor.addRegionToMeta(master.getConnection(), hriOffline); log("Regions in hbase:meta and namespace have been created"); // at this point we only expect 3 regions to be assigned out // (catalogs and namespace, + 1 online region) assertEquals(3, cluster.countServedRegions()); - HRegionInfo hriOnline = onlineTable.getRegionLocation( - HConstants.EMPTY_START_ROW).getRegionInfo(); - + HRegionInfo hriOnline = null; + try (RegionLocator locator = + TEST_UTIL.getConnection().getRegionLocator(TableName.valueOf("onlineTable"))) { + hriOnline = locator.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo(); + } RegionStates regionStates = master.getAssignmentManager().getRegionStates(); RegionStateStore stateStore = master.getAssignmentManager().getRegionStateStore(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java index c2f44e2..fc7f136 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java @@ -44,7 +44,7 @@ import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.monitoring.MonitoredTask; @@ -206,7 +206,7 @@ public class TestMasterNoCluster { } @Override - public HConnection getShortCircuitConnection() { + public ClusterConnection getConnection() { // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than // the conf from the master; the conf will already have an HConnection // associate so the below mocking of a connection will fail. @@ -282,7 +282,7 @@ public class TestMasterNoCluster { } @Override - public HConnection getShortCircuitConnection() { + public ClusterConnection getConnection() { // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than // the conf from the master; the conf will already have an HConnection // associate so the below mocking of a connection will fail. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java index 4e6fe59..289741e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java @@ -43,7 +43,9 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor.Visitor; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; @@ -62,7 +64,8 @@ import org.junit.experimental.categories.Category; public class TestMasterOperationsForRegionReplicas { final static Log LOG = LogFactory.getLog(TestRegionPlacement.class); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static HBaseAdmin admin; + private static Connection CONNECTION = null; + private static Admin ADMIN; private static int numSlaves = 2; private static Configuration conf; @@ -71,14 +74,17 @@ public class TestMasterOperationsForRegionReplicas { conf = TEST_UTIL.getConfiguration(); conf.setBoolean("hbase.tests.use.shortcircuit.reads", false); TEST_UTIL.startMiniCluster(numSlaves); - admin = new HBaseAdmin(conf); - while(admin.getClusterStatus().getServers().size() < numSlaves) { + CONNECTION = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + ADMIN = CONNECTION.getAdmin(); + while(ADMIN.getClusterStatus().getServers().size() < numSlaves) { Thread.sleep(100); } } @AfterClass public static void tearDownAfterClass() throws Exception { + if (ADMIN != null) ADMIN.close(); + if (CONNECTION != null && !CONNECTION.isClosed()) CONNECTION.close(); TEST_UTIL.shutdownMiniCluster(); } @@ -91,15 +97,15 @@ public class TestMasterOperationsForRegionReplicas { HTableDescriptor desc = new HTableDescriptor(table); desc.setRegionReplication(numReplica); desc.addFamily(new HColumnDescriptor("family")); - admin.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions); + ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions); - validateNumberOfRowsInMeta(table, numRegions, admin.getConnection()); + validateNumberOfRowsInMeta(table, numRegions, ADMIN.getConnection()); List hris = MetaTableAccessor.getTableRegions( - admin.getConnection(), table); + ADMIN.getConnection(), table); assert(hris.size() == numRegions * numReplica); } finally { - admin.disableTable(table); - admin.deleteTable(table); + ADMIN.disableTable(table); + ADMIN.deleteTable(table); } } @@ -112,11 +118,11 @@ public class TestMasterOperationsForRegionReplicas { HTableDescriptor desc = new HTableDescriptor(table); desc.setRegionReplication(numReplica); desc.addFamily(new HColumnDescriptor("family")); - admin.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions); + ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions); TEST_UTIL.waitTableEnabled(table); - validateNumberOfRowsInMeta(table, numRegions, admin.getConnection()); + validateNumberOfRowsInMeta(table, numRegions, ADMIN.getConnection()); - List hris = MetaTableAccessor.getTableRegions(admin.getConnection(), table); + List hris = MetaTableAccessor.getTableRegions(ADMIN.getConnection(), table); assert(hris.size() == numRegions * numReplica); // check that the master created expected number of RegionState objects for (int i = 0; i < numRegions; i++) { @@ -128,7 +134,7 @@ public class TestMasterOperationsForRegionReplicas { } } - List metaRows = MetaTableAccessor.fullScanOfMeta(admin.getConnection()); + List metaRows = MetaTableAccessor.fullScanOfMeta(ADMIN.getConnection()); int numRows = 0; for (Result result : metaRows) { RegionLocations locations = MetaTableAccessor.getRegionLocations(result); @@ -145,7 +151,7 @@ public class TestMasterOperationsForRegionReplicas { // The same verification of the meta as above but with the SnapshotOfRegionAssignmentFromMeta // class validateFromSnapshotFromMeta(TEST_UTIL, table, numRegions, numReplica, - admin.getConnection()); + ADMIN.getConnection()); // Now kill the master, restart it and see if the assignments are kept ServerName master = TEST_UTIL.getHBaseClusterInterface().getClusterStatus().getMaster(); @@ -162,7 +168,7 @@ public class TestMasterOperationsForRegionReplicas { } } validateFromSnapshotFromMeta(TEST_UTIL, table, numRegions, numReplica, - admin.getConnection()); + ADMIN.getConnection()); // Now shut the whole cluster down, and verify the assignments are kept so that the // availability constraints are met. @@ -170,46 +176,42 @@ public class TestMasterOperationsForRegionReplicas { TEST_UTIL.shutdownMiniHBaseCluster(); TEST_UTIL.startMiniHBaseCluster(1, numSlaves); TEST_UTIL.waitTableEnabled(table); - admin.close(); - admin = new HBaseAdmin(conf); validateFromSnapshotFromMeta(TEST_UTIL, table, numRegions, numReplica, - admin.getConnection()); + ADMIN.getConnection()); // Now shut the whole cluster down, and verify regions are assigned even if there is only // one server running TEST_UTIL.shutdownMiniHBaseCluster(); TEST_UTIL.startMiniHBaseCluster(1, 1); TEST_UTIL.waitTableEnabled(table); - admin.close(); - admin = new HBaseAdmin(conf); - validateSingleRegionServerAssignment(admin.getConnection(), numRegions, numReplica); + validateSingleRegionServerAssignment(ADMIN.getConnection(), numRegions, numReplica); for (int i = 1; i < numSlaves; i++) { //restore the cluster TEST_UTIL.getMiniHBaseCluster().startRegionServer(); } //check on alter table - admin.disableTable(table); - assert(admin.isTableDisabled(table)); + ADMIN.disableTable(table); + assert(ADMIN.isTableDisabled(table)); //increase the replica desc.setRegionReplication(numReplica + 1); - admin.modifyTable(table, desc); - admin.enableTable(table); - assert(admin.isTableEnabled(table)); + ADMIN.modifyTable(table, desc); + ADMIN.enableTable(table); + assert(ADMIN.isTableEnabled(table)); List regions = TEST_UTIL.getMiniHBaseCluster().getMaster() .getAssignmentManager().getRegionStates().getRegionsOfTable(table); assert(regions.size() == numRegions * (numReplica + 1)); //decrease the replica(earlier, table was modified to have a replica count of numReplica + 1) - admin.disableTable(table); + ADMIN.disableTable(table); desc.setRegionReplication(numReplica); - admin.modifyTable(table, desc); - admin.enableTable(table); - assert(admin.isTableEnabled(table)); + ADMIN.modifyTable(table, desc); + ADMIN.enableTable(table); + assert(ADMIN.isTableEnabled(table)); regions = TEST_UTIL.getMiniHBaseCluster().getMaster() .getAssignmentManager().getRegionStates().getRegionsOfTable(table); assert(regions.size() == numRegions * numReplica); //also make sure the meta table has the replica locations removed - hris = MetaTableAccessor.getTableRegions(admin.getConnection(), table); + hris = MetaTableAccessor.getTableRegions(ADMIN.getConnection(), table); assert(hris.size() == numRegions * numReplica); //just check that the number of default replica regions in the meta table are the same //as the number of regions the table was created with, and the count of the @@ -225,8 +227,8 @@ public class TestMasterOperationsForRegionReplicas { Collection counts = new HashSet(defaultReplicas.values()); assert(counts.size() == 1 && counts.contains(new Integer(numReplica))); } finally { - admin.disableTable(table); - admin.deleteTable(table); + ADMIN.disableTable(table); + ADMIN.deleteTable(table); } } @@ -241,17 +243,17 @@ public class TestMasterOperationsForRegionReplicas { HTableDescriptor desc = new HTableDescriptor(table); desc.setRegionReplication(numReplica); desc.addFamily(new HColumnDescriptor("family")); - admin.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions); + ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions); TEST_UTIL.waitTableEnabled(table); Set tableRows = new HashSet(); - List hris = MetaTableAccessor.getTableRegions(admin.getConnection(), table); + List hris = MetaTableAccessor.getTableRegions(ADMIN.getConnection(), table); for (HRegionInfo hri : hris) { tableRows.add(hri.getRegionName()); } - admin.disableTable(table); + ADMIN.disableTable(table); // now delete one replica info from all the rows // this is to make the meta appear to be only partially updated - Table metaTable = new HTable(TableName.META_TABLE_NAME, admin.getConnection()); + Table metaTable = new HTable(TableName.META_TABLE_NAME, ADMIN.getConnection()); for (byte[] row : tableRows) { Delete deleteOneReplicaLocation = new Delete(row); deleteOneReplicaLocation.deleteColumns(HConstants.CATALOG_FAMILY, @@ -265,14 +267,14 @@ public class TestMasterOperationsForRegionReplicas { metaTable.close(); // even if the meta table is partly updated, when we re-enable the table, we should // get back the desired number of replicas for the regions - admin.enableTable(table); - assert(admin.isTableEnabled(table)); + ADMIN.enableTable(table); + assert(ADMIN.isTableEnabled(table)); List regions = TEST_UTIL.getMiniHBaseCluster().getMaster() .getAssignmentManager().getRegionStates().getRegionsOfTable(table); assert(regions.size() == numRegions * numReplica); } finally { - admin.disableTable(table); - admin.deleteTable(table); + ADMIN.disableTable(table); + ADMIN.deleteTable(table); } } @@ -286,7 +288,7 @@ public class TestMasterOperationsForRegionReplicas { private void validateNumberOfRowsInMeta(final TableName table, int numRegions, Connection connection) throws IOException { - assert(admin.tableExists(table)); + assert(ADMIN.tableExists(table)); final AtomicInteger count = new AtomicInteger(); Visitor visitor = new Visitor() { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java index 74afe7e..5b6f985 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java @@ -131,7 +131,7 @@ public class TestRestartCluster { // We don't have to use SnapshotOfRegionAssignmentFromMeta. // We use it here because AM used to use it to load all user region placements SnapshotOfRegionAssignmentFromMeta snapshot = new SnapshotOfRegionAssignmentFromMeta( - master.getShortCircuitConnection()); + master.getConnection()); snapshot.initialize(); Map regionToRegionServerMap = snapshot.getRegionToRegionServerMap(); @@ -197,7 +197,7 @@ public class TestRestartCluster { Threads.sleep(100); } - snapshot =new SnapshotOfRegionAssignmentFromMeta(master.getShortCircuitConnection()); + snapshot =new SnapshotOfRegionAssignmentFromMeta(master.getConnection()); snapshot.initialize(); Map newRegionToRegionServerMap = snapshot.getRegionToRegionServerMap(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java index 76a6db9..53de0a4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java @@ -58,7 +58,7 @@ import org.apache.hadoop.hbase.SplitLogCounters; import org.apache.hadoop.hbase.SplitLogTask; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.Waiter; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination; import org.apache.hadoop.hbase.master.SplitLogManager.Task; import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch; @@ -154,7 +154,7 @@ public class TestSplitLogManager { } @Override - public HConnection getShortCircuitConnection() { + public ClusterConnection getConnection() { return null; } @@ -162,7 +162,6 @@ public class TestSplitLogManager { public MetaTableLocator getMetaTableLocator() { return null; } - } static Stoppable stopper = new Stoppable() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java index a0b479f..b045c72 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.EnvironmentEdge; @@ -217,7 +217,7 @@ public class TestHFileCleaner { } @Override - public HConnection getShortCircuitConnection() { + public ClusterConnection getConnection() { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java index f4fff62..a004134 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java @@ -33,7 +33,8 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.CoordinatedStateManager; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -153,7 +154,7 @@ public class TestHFileLinkCleaner { } @Override - public HConnection getShortCircuitConnection() { + public ClusterConnection getConnection() { return null; } @@ -183,4 +184,4 @@ public class TestHFileLinkCleaner { return false; } } -} +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java index 2400584..4e8ec09 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java @@ -26,9 +26,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.CoordinatedStateManager; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationQueues; import org.apache.hadoop.hbase.replication.regionserver.Replication; @@ -160,7 +164,7 @@ public class TestLogsCleaner { } @Override - public HConnection getShortCircuitConnection() { + public ClusterConnection getConnection() { return null; } @@ -190,6 +194,4 @@ public class TestLogsCleaner { return false; } } - -} - +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java index 84d9155..34239c0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java @@ -18,6 +18,9 @@ package org.apache.hadoop.hbase.quotas; +import static org.junit.Assert.assertEquals; + +import java.io.IOException; import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; @@ -25,20 +28,20 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; - +import org.junit.After; import org.junit.AfterClass; +import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; -import static org.junit.Assert.assertEquals; - /** * Test the quota table helpers (e.g. CRUD operations) */ @@ -47,6 +50,7 @@ public class TestQuotaTableUtil { final Log LOG = LogFactory.getLog(getClass()); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private Connection connection; @BeforeClass public static void setUpBeforeClass() throws Exception { @@ -66,6 +70,16 @@ public class TestQuotaTableUtil { TEST_UTIL.shutdownMiniCluster(); } + @Before + public void before() throws IOException { + this.connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + } + + @After + public void after() throws IOException { + this.connection.close(); + } + @Test public void testTableQuotaUtil() throws Exception { final TableName table = TableName.valueOf("testTableQuotaUtilTable"); @@ -79,13 +93,13 @@ public class TestQuotaTableUtil { .build(); // Add user quota and verify it - QuotaUtil.addTableQuota(TEST_UTIL.getConfiguration(), table, quota); - Quotas resQuota = QuotaUtil.getTableQuota(TEST_UTIL.getConfiguration(), table); + QuotaUtil.addTableQuota(this.connection, table, quota); + Quotas resQuota = QuotaUtil.getTableQuota(this.connection, table); assertEquals(quota, resQuota); // Remove user quota and verify it - QuotaUtil.deleteTableQuota(TEST_UTIL.getConfiguration(), table); - resQuota = QuotaUtil.getTableQuota(TEST_UTIL.getConfiguration(), table); + QuotaUtil.deleteTableQuota(this.connection, table); + resQuota = QuotaUtil.getTableQuota(this.connection, table); assertEquals(null, resQuota); } @@ -102,13 +116,13 @@ public class TestQuotaTableUtil { .build(); // Add user quota and verify it - QuotaUtil.addNamespaceQuota(TEST_UTIL.getConfiguration(), namespace, quota); - Quotas resQuota = QuotaUtil.getNamespaceQuota(TEST_UTIL.getConfiguration(), namespace); + QuotaUtil.addNamespaceQuota(this.connection, namespace, quota); + Quotas resQuota = QuotaUtil.getNamespaceQuota(this.connection, namespace); assertEquals(quota, resQuota); // Remove user quota and verify it - QuotaUtil.deleteNamespaceQuota(TEST_UTIL.getConfiguration(), namespace); - resQuota = QuotaUtil.getNamespaceQuota(TEST_UTIL.getConfiguration(), namespace); + QuotaUtil.deleteNamespaceQuota(this.connection, namespace); + resQuota = QuotaUtil.getNamespaceQuota(this.connection, namespace); assertEquals(null, resQuota); } @@ -139,33 +153,33 @@ public class TestQuotaTableUtil { .build(); // Add user global quota - QuotaUtil.addUserQuota(TEST_UTIL.getConfiguration(), user, quota); - Quotas resQuota = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user); + QuotaUtil.addUserQuota(this.connection, user, quota); + Quotas resQuota = QuotaUtil.getUserQuota(this.connection, user); assertEquals(quota, resQuota); // Add user quota for table - QuotaUtil.addUserQuota(TEST_UTIL.getConfiguration(), user, table, quotaTable); - Quotas resQuotaTable = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user, table); + QuotaUtil.addUserQuota(this.connection, user, table, quotaTable); + Quotas resQuotaTable = QuotaUtil.getUserQuota(this.connection, user, table); assertEquals(quotaTable, resQuotaTable); // Add user quota for namespace - QuotaUtil.addUserQuota(TEST_UTIL.getConfiguration(), user, namespace, quotaNamespace); - Quotas resQuotaNS = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user, namespace); + QuotaUtil.addUserQuota(this.connection, user, namespace, quotaNamespace); + Quotas resQuotaNS = QuotaUtil.getUserQuota(this.connection, user, namespace); assertEquals(quotaNamespace, resQuotaNS); // Delete user global quota - QuotaUtil.deleteUserQuota(TEST_UTIL.getConfiguration(), user); - resQuota = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user); + QuotaUtil.deleteUserQuota(this.connection, user); + resQuota = QuotaUtil.getUserQuota(this.connection, user); assertEquals(null, resQuota); // Delete user quota for table - QuotaUtil.deleteUserQuota(TEST_UTIL.getConfiguration(), user, table); - resQuotaTable = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user, table); + QuotaUtil.deleteUserQuota(this.connection, user, table); + resQuotaTable = QuotaUtil.getUserQuota(this.connection, user, table); assertEquals(null, resQuotaTable); // Delete user quota for namespace - QuotaUtil.deleteUserQuota(TEST_UTIL.getConfiguration(), user, namespace); - resQuotaNS = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user, namespace); + QuotaUtil.deleteUserQuota(this.connection, user, namespace); + resQuotaNS = QuotaUtil.getUserQuota(this.connection, user, namespace); assertEquals(null, resQuotaNS); } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java index 44d3b45..ce2869b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java @@ -57,6 +57,7 @@ public class TestHRegionOnCluster { public void testDataCorrectnessReplayingRecoveredEdits() throws Exception { final int NUM_MASTERS = 1; final int NUM_RS = 3; + Admin hbaseAdmin = null; TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS); try { @@ -68,7 +69,7 @@ public class TestHRegionOnCluster { // Create table HTableDescriptor desc = new HTableDescriptor(TABLENAME); desc.addFamily(new HColumnDescriptor(FAMILY)); - Admin hbaseAdmin = TEST_UTIL.getHBaseAdmin(); + hbaseAdmin = master.getConnection().getAdmin(); hbaseAdmin.createTable(desc); assertTrue(hbaseAdmin.isTableAvailable(TABLENAME)); @@ -130,6 +131,7 @@ public class TestHRegionOnCluster { putDataAndVerify(table, "r4", FAMILY, "v4", 4); } finally { + if (hbaseAdmin != null) hbaseAdmin.close(); TEST_UTIL.shutdownMiniCluster(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java index fcfe063..91de97c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java @@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.io.hfile.BlockCache; -import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; import org.apache.hadoop.hbase.io.hfile.CacheStats; import org.apache.hadoop.hbase.io.hfile.Cacheable; @@ -477,7 +477,7 @@ public class TestHeapMemoryManager { } @Override - public HConnection getShortCircuitConnection() { + public ClusterConnection getConnection() { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java index 1a14571..80599ea 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java @@ -209,12 +209,12 @@ public class TestRegionMergeTransactionOnCluster { table.close(); List> tableRegions = MetaTableAccessor - .getTableRegionsAndLocations(master.getShortCircuitConnection(), tableName); + .getTableRegionsAndLocations(master.getConnection(), tableName); HRegionInfo mergedRegionInfo = tableRegions.get(0).getFirst(); HTableDescriptor tableDescritor = master.getTableDescriptors().get( tableName); Result mergedRegionResult = MetaTableAccessor.getRegionResult( - master.getShortCircuitConnection(), mergedRegionInfo.getRegionName()); + master.getConnection(), mergedRegionInfo.getRegionName()); // contains merge reference in META assertTrue(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY, @@ -257,7 +257,7 @@ public class TestRegionMergeTransactionOnCluster { assertFalse(fs.exists(regionBdir)); mergedRegionResult = MetaTableAccessor.getRegionResult( - master.getShortCircuitConnection(), mergedRegionInfo.getRegionName()); + master.getConnection(), mergedRegionInfo.getRegionName()); assertFalse(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER) != null); assertFalse(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY, @@ -327,13 +327,13 @@ public class TestRegionMergeTransactionOnCluster { createTableAndLoadData(master, tableName, 5, 2); List> initialRegionToServers = MetaTableAccessor.getTableRegionsAndLocations( - master.getShortCircuitConnection(), tableName); + master.getConnection(), tableName); // Merge 1st and 2nd region PairOfSameType mergedRegions = mergeRegionsAndVerifyRegionNum(master, tableName, 0, 2, 5 * 2 - 2); List> currentRegionToServers = MetaTableAccessor.getTableRegionsAndLocations( - master.getShortCircuitConnection(), tableName); + master.getConnection(), tableName); List initialRegions = new ArrayList(); for (Pair p : initialRegionToServers) { initialRegions.add(p.getFirst()); @@ -373,7 +373,7 @@ public class TestRegionMergeTransactionOnCluster { int regionAnum, int regionBnum) throws Exception { List> tableRegions = MetaTableAccessor .getTableRegionsAndLocations( - master.getShortCircuitConnection(), tablename); + master.getConnection(), tablename); HRegionInfo regionA = tableRegions.get(regionAnum).getFirst(); HRegionInfo regionB = tableRegions.get(regionBnum).getFirst(); TEST_UTIL.getHBaseAdmin().mergeRegions( @@ -389,7 +389,7 @@ public class TestRegionMergeTransactionOnCluster { long timeout = System.currentTimeMillis() + waitTime; while (System.currentTimeMillis() < timeout) { tableRegionsInMeta = MetaTableAccessor.getTableRegionsAndLocations( - master.getShortCircuitConnection(), tablename); + master.getConnection(), tablename); tableRegionsInMaster = master.getAssignmentManager().getRegionStates() .getRegionsOfTable(tablename); if (tableRegionsInMeta.size() == expectedRegionNum @@ -400,7 +400,7 @@ public class TestRegionMergeTransactionOnCluster { } tableRegionsInMeta = MetaTableAccessor.getTableRegionsAndLocations( - master.getShortCircuitConnection(), tablename); + master.getConnection(), tablename); LOG.info("Regions after merge:" + Joiner.on(',').join(tableRegionsInMeta)); assertEquals(expectedRegionNum, tableRegionsInMeta.size()); } @@ -430,14 +430,14 @@ public class TestRegionMergeTransactionOnCluster { List> tableRegions; while (System.currentTimeMillis() < timeout) { tableRegions = MetaTableAccessor.getTableRegionsAndLocations( - master.getShortCircuitConnection(), tablename); + master.getConnection(), tablename); if (tableRegions.size() == numRegions * replication) break; Thread.sleep(250); } tableRegions = MetaTableAccessor.getTableRegionsAndLocations( - master.getShortCircuitConnection(), tablename); + master.getConnection(), tablename); LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions)); assertEquals(numRegions * replication, tableRegions.size()); return table; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java index 1ae58f1..44d7464 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java @@ -18,10 +18,11 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.not; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertThat; -import static org.hamcrest.CoreMatchers.*; +import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -36,17 +37,17 @@ import org.apache.hadoop.hbase.CoordinatedStateManagerFactory; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogCounters; import org.apache.hadoop.hbase.SplitLogTask; import org.apache.hadoop.hbase.Waiter; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorType; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; @@ -128,7 +129,7 @@ public class TestSplitLogWorker { } @Override - public HConnection getShortCircuitConnection() { + public ClusterConnection getConnection() { return null; } @@ -136,7 +137,6 @@ public class TestSplitLogWorker { public MetaTableLocator getMetaTableLocator() { return null; } - } private void waitForCounter(AtomicLong ctr, long oldval, long newval, long timems) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index 29073ed..12b177a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -634,7 +634,7 @@ public class TestSplitTransactionOnCluster { admin.setBalancerRunning(false, true); // Turn off the meta scanner so it don't remove parent on us. cluster.getMaster().setCatalogJanitorEnabled(false); - boolean tableExists = MetaTableAccessor.tableExists(regionServer.getShortCircuitConnection(), + boolean tableExists = MetaTableAccessor.tableExists(regionServer.getConnection(), tableName); assertEquals("The specified table should present.", true, tableExists); final HRegion region = findSplittableRegion(regions); @@ -646,7 +646,7 @@ public class TestSplitTransactionOnCluster { } catch (IOException e) { } - tableExists = MetaTableAccessor.tableExists(regionServer.getShortCircuitConnection(), + tableExists = MetaTableAccessor.tableExists(regionServer.getConnection(), tableName); assertEquals("The specified table should present.", true, tableExists); } finally { @@ -680,7 +680,7 @@ public class TestSplitTransactionOnCluster { admin.setBalancerRunning(false, true); // Turn off the meta scanner so it don't remove parent on us. cluster.getMaster().setCatalogJanitorEnabled(false); - boolean tableExists = MetaTableAccessor.tableExists(regionServer.getShortCircuitConnection(), + boolean tableExists = MetaTableAccessor.tableExists(regionServer.getConnection(), tableName); assertEquals("The specified table should be present.", true, tableExists); final HRegion region = findSplittableRegion(oldRegions); @@ -703,7 +703,7 @@ public class TestSplitTransactionOnCluster { Thread.sleep(1000); } while ((newRegions.contains(oldRegions.get(0)) || newRegions.contains(oldRegions.get(1))) || newRegions.size() != 4); - tableExists = MetaTableAccessor.tableExists(regionServer.getShortCircuitConnection(), + tableExists = MetaTableAccessor.tableExists(regionServer.getConnection(), tableName); assertEquals("The specified table should be present.", true, tableExists); // exists works on stale and we see the put after the flush diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java index ab5f136..169feba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java @@ -32,11 +32,17 @@ import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; @@ -267,115 +273,127 @@ public class TestPerTableCFReplication { @Test(timeout=300000) public void testPerTableCFReplication() throws Exception { LOG.info("testPerTableCFReplication"); - ReplicationAdmin admin1 = new ReplicationAdmin(conf1); - - new HBaseAdmin(conf1).createTable(tabA); - new HBaseAdmin(conf1).createTable(tabB); - new HBaseAdmin(conf1).createTable(tabC); - new HBaseAdmin(conf2).createTable(tabA); - new HBaseAdmin(conf2).createTable(tabB); - new HBaseAdmin(conf2).createTable(tabC); - new HBaseAdmin(conf3).createTable(tabA); - new HBaseAdmin(conf3).createTable(tabB); - new HBaseAdmin(conf3).createTable(tabC); - - Table htab1A = new HTable(conf1, tabAName); - Table htab2A = new HTable(conf2, tabAName); - Table htab3A = new HTable(conf3, tabAName); - - Table htab1B = new HTable(conf1, tabBName); - Table htab2B = new HTable(conf2, tabBName); - Table htab3B = new HTable(conf3, tabBName); - - Table htab1C = new HTable(conf1, tabCName); - Table htab2C = new HTable(conf2, tabCName); - Table htab3C = new HTable(conf3, tabCName); - - // A. add cluster2/cluster3 as peers to cluster1 - admin1.addPeer("2", utility2.getClusterKey(), "TC;TB:f1,f3"); - admin1.addPeer("3", utility3.getClusterKey(), "TA;TB:f1,f2"); - - // A1. tableA can only replicated to cluster3 - putAndWaitWithFamily(row1, f1Name, htab1A, htab3A); - ensureRowNotReplicated(row1, f1Name, htab2A); - deleteAndWaitWithFamily(row1, f1Name, htab1A, htab3A); - - putAndWaitWithFamily(row1, f2Name, htab1A, htab3A); - ensureRowNotReplicated(row1, f2Name, htab2A); - deleteAndWaitWithFamily(row1, f2Name, htab1A, htab3A); - - putAndWaitWithFamily(row1, f3Name, htab1A, htab3A); - ensureRowNotReplicated(row1, f3Name, htab2A); - deleteAndWaitWithFamily(row1, f3Name, htab1A, htab3A); - - // A2. cf 'f1' of tableB can replicated to both cluster2 and cluster3 - putAndWaitWithFamily(row1, f1Name, htab1B, htab2B, htab3B); - deleteAndWaitWithFamily(row1, f1Name, htab1B, htab2B, htab3B); - - // cf 'f2' of tableB can only replicated to cluster3 - putAndWaitWithFamily(row1, f2Name, htab1B, htab3B); - ensureRowNotReplicated(row1, f2Name, htab2B); - deleteAndWaitWithFamily(row1, f2Name, htab1B, htab3B); - - // cf 'f3' of tableB can only replicated to cluster2 - putAndWaitWithFamily(row1, f3Name, htab1B, htab2B); - ensureRowNotReplicated(row1, f3Name, htab3B); - deleteAndWaitWithFamily(row1, f3Name, htab1B, htab2B); - - // A3. tableC can only replicated to cluster2 - putAndWaitWithFamily(row1, f1Name, htab1C, htab2C); - ensureRowNotReplicated(row1, f1Name, htab3C); - deleteAndWaitWithFamily(row1, f1Name, htab1C, htab2C); - - putAndWaitWithFamily(row1, f2Name, htab1C, htab2C); - ensureRowNotReplicated(row1, f2Name, htab3C); - deleteAndWaitWithFamily(row1, f2Name, htab1C, htab2C); - - putAndWaitWithFamily(row1, f3Name, htab1C, htab2C); - ensureRowNotReplicated(row1, f3Name, htab3C); - deleteAndWaitWithFamily(row1, f3Name, htab1C, htab2C); - - // B. change peers' replicable table-cf config - admin1.setPeerTableCFs("2", "TA:f1,f2; TC:f2,f3"); - admin1.setPeerTableCFs("3", "TB; TC:f3"); - - // B1. cf 'f1' of tableA can only replicated to cluster2 - putAndWaitWithFamily(row2, f1Name, htab1A, htab2A); - ensureRowNotReplicated(row2, f1Name, htab3A); - deleteAndWaitWithFamily(row2, f1Name, htab1A, htab2A); - // cf 'f2' of tableA can only replicated to cluster2 - putAndWaitWithFamily(row2, f2Name, htab1A, htab2A); - ensureRowNotReplicated(row2, f2Name, htab3A); - deleteAndWaitWithFamily(row2, f2Name, htab1A, htab2A); - // cf 'f3' of tableA isn't replicable to either cluster2 or cluster3 - putAndWaitWithFamily(row2, f3Name, htab1A); - ensureRowNotReplicated(row2, f3Name, htab2A, htab3A); - deleteAndWaitWithFamily(row2, f3Name, htab1A); - - // B2. tableB can only replicated to cluster3 - putAndWaitWithFamily(row2, f1Name, htab1B, htab3B); - ensureRowNotReplicated(row2, f1Name, htab2B); - deleteAndWaitWithFamily(row2, f1Name, htab1B, htab3B); - - putAndWaitWithFamily(row2, f2Name, htab1B, htab3B); - ensureRowNotReplicated(row2, f2Name, htab2B); - deleteAndWaitWithFamily(row2, f2Name, htab1B, htab3B); - - putAndWaitWithFamily(row2, f3Name, htab1B, htab3B); - ensureRowNotReplicated(row2, f3Name, htab2B); - deleteAndWaitWithFamily(row2, f3Name, htab1B, htab3B); - - // B3. cf 'f1' of tableC non-replicable to either cluster - putAndWaitWithFamily(row2, f1Name, htab1C); - ensureRowNotReplicated(row2, f1Name, htab2C, htab3C); - deleteAndWaitWithFamily(row2, f1Name, htab1C); - // cf 'f2' of tableC can only replicated to cluster2 - putAndWaitWithFamily(row2, f2Name, htab1C, htab2C); - ensureRowNotReplicated(row2, f2Name, htab3C); - deleteAndWaitWithFamily(row2, f2Name, htab1C, htab2C); - // cf 'f3' of tableC can replicated to cluster2 and cluster3 - putAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C); - deleteAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C); + ReplicationAdmin replicationAdmin = new ReplicationAdmin(conf1); + Connection connection1 = ConnectionFactory.createConnection(conf1); + Connection connection2 = ConnectionFactory.createConnection(conf2); + Connection connection3 = ConnectionFactory.createConnection(conf3); + try { + Admin admin1 = connection1.getAdmin(); + Admin admin2 = connection2.getAdmin(); + Admin admin3 = connection3.getAdmin(); + + admin1.createTable(tabA); + admin1.createTable(tabB); + admin1.createTable(tabC); + admin2.createTable(tabA); + admin2.createTable(tabB); + admin2.createTable(tabC); + admin3.createTable(tabA); + admin3.createTable(tabB); + admin3.createTable(tabC); + + Table htab1A = connection1.getTable(tabAName); + Table htab2A = connection2.getTable(tabAName); + Table htab3A = connection3.getTable(tabAName); + + Table htab1B = connection1.getTable(tabBName); + Table htab2B = connection2.getTable(tabBName); + Table htab3B = connection3.getTable(tabBName); + + Table htab1C = connection1.getTable(tabCName); + Table htab2C = connection2.getTable(tabCName); + Table htab3C = connection3.getTable(tabCName); + + // A. add cluster2/cluster3 as peers to cluster1 + replicationAdmin.addPeer("2", utility2.getClusterKey(), "TC;TB:f1,f3"); + replicationAdmin.addPeer("3", utility3.getClusterKey(), "TA;TB:f1,f2"); + + // A1. tableA can only replicated to cluster3 + putAndWaitWithFamily(row1, f1Name, htab1A, htab3A); + ensureRowNotReplicated(row1, f1Name, htab2A); + deleteAndWaitWithFamily(row1, f1Name, htab1A, htab3A); + + putAndWaitWithFamily(row1, f2Name, htab1A, htab3A); + ensureRowNotReplicated(row1, f2Name, htab2A); + deleteAndWaitWithFamily(row1, f2Name, htab1A, htab3A); + + putAndWaitWithFamily(row1, f3Name, htab1A, htab3A); + ensureRowNotReplicated(row1, f3Name, htab2A); + deleteAndWaitWithFamily(row1, f3Name, htab1A, htab3A); + + // A2. cf 'f1' of tableB can replicated to both cluster2 and cluster3 + putAndWaitWithFamily(row1, f1Name, htab1B, htab2B, htab3B); + deleteAndWaitWithFamily(row1, f1Name, htab1B, htab2B, htab3B); + + // cf 'f2' of tableB can only replicated to cluster3 + putAndWaitWithFamily(row1, f2Name, htab1B, htab3B); + ensureRowNotReplicated(row1, f2Name, htab2B); + deleteAndWaitWithFamily(row1, f2Name, htab1B, htab3B); + + // cf 'f3' of tableB can only replicated to cluster2 + putAndWaitWithFamily(row1, f3Name, htab1B, htab2B); + ensureRowNotReplicated(row1, f3Name, htab3B); + deleteAndWaitWithFamily(row1, f3Name, htab1B, htab2B); + + // A3. tableC can only replicated to cluster2 + putAndWaitWithFamily(row1, f1Name, htab1C, htab2C); + ensureRowNotReplicated(row1, f1Name, htab3C); + deleteAndWaitWithFamily(row1, f1Name, htab1C, htab2C); + + putAndWaitWithFamily(row1, f2Name, htab1C, htab2C); + ensureRowNotReplicated(row1, f2Name, htab3C); + deleteAndWaitWithFamily(row1, f2Name, htab1C, htab2C); + + putAndWaitWithFamily(row1, f3Name, htab1C, htab2C); + ensureRowNotReplicated(row1, f3Name, htab3C); + deleteAndWaitWithFamily(row1, f3Name, htab1C, htab2C); + + // B. change peers' replicable table-cf config + replicationAdmin.setPeerTableCFs("2", "TA:f1,f2; TC:f2,f3"); + replicationAdmin.setPeerTableCFs("3", "TB; TC:f3"); + + // B1. cf 'f1' of tableA can only replicated to cluster2 + putAndWaitWithFamily(row2, f1Name, htab1A, htab2A); + ensureRowNotReplicated(row2, f1Name, htab3A); + deleteAndWaitWithFamily(row2, f1Name, htab1A, htab2A); + // cf 'f2' of tableA can only replicated to cluster2 + putAndWaitWithFamily(row2, f2Name, htab1A, htab2A); + ensureRowNotReplicated(row2, f2Name, htab3A); + deleteAndWaitWithFamily(row2, f2Name, htab1A, htab2A); + // cf 'f3' of tableA isn't replicable to either cluster2 or cluster3 + putAndWaitWithFamily(row2, f3Name, htab1A); + ensureRowNotReplicated(row2, f3Name, htab2A, htab3A); + deleteAndWaitWithFamily(row2, f3Name, htab1A); + + // B2. tableB can only replicated to cluster3 + putAndWaitWithFamily(row2, f1Name, htab1B, htab3B); + ensureRowNotReplicated(row2, f1Name, htab2B); + deleteAndWaitWithFamily(row2, f1Name, htab1B, htab3B); + + putAndWaitWithFamily(row2, f2Name, htab1B, htab3B); + ensureRowNotReplicated(row2, f2Name, htab2B); + deleteAndWaitWithFamily(row2, f2Name, htab1B, htab3B); + + putAndWaitWithFamily(row2, f3Name, htab1B, htab3B); + ensureRowNotReplicated(row2, f3Name, htab2B); + deleteAndWaitWithFamily(row2, f3Name, htab1B, htab3B); + + // B3. cf 'f1' of tableC non-replicable to either cluster + putAndWaitWithFamily(row2, f1Name, htab1C); + ensureRowNotReplicated(row2, f1Name, htab2C, htab3C); + deleteAndWaitWithFamily(row2, f1Name, htab1C); + // cf 'f2' of tableC can only replicated to cluster2 + putAndWaitWithFamily(row2, f2Name, htab1C, htab2C); + ensureRowNotReplicated(row2, f2Name, htab3C); + deleteAndWaitWithFamily(row2, f2Name, htab1C, htab2C); + // cf 'f3' of tableC can replicated to cluster2 and cluster3 + putAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C); + deleteAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C); + } finally { + connection1.close(); + connection2.close(); + connection3.close(); + } } private void ensureRowNotReplicated(byte[] row, byte[] fam, Table... tables) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java index 0971d8c..f8060ba 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hbase.replication; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import org.apache.commons.logging.Log; @@ -28,7 +31,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; @@ -38,12 +41,9 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; import org.junit.After; import org.junit.AfterClass; -import org.junit.Test; - -import static org.junit.Assert.*; - import org.junit.Before; import org.junit.BeforeClass; +import org.junit.Test; import org.junit.experimental.categories.Category; @Category({ReplicationTests.class, MediumTests.class}) @@ -149,7 +149,7 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic { } @Override - public HConnection getShortCircuitConnection() { + public ClusterConnection getConnection() { return null; } @@ -184,5 +184,4 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic { return this.isStopped; } } -} - +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java index 73a631e..72cbf8f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hbase.replication; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.util.ArrayList; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; @@ -25,12 +29,12 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterId; +import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.CoordinatedStateManager; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; @@ -38,13 +42,9 @@ import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.AfterClass; -import org.junit.Test; -import org.junit.Ignore; - -import static org.junit.Assert.*; - import org.junit.Before; import org.junit.BeforeClass; +import org.junit.Test; import org.junit.experimental.categories.Category; /** @@ -250,7 +250,7 @@ public class TestReplicationTrackerZKImpl { } @Override - public HConnection getShortCircuitConnection() { + public ClusterConnection getConnection() { return null; } @@ -285,5 +285,4 @@ public class TestReplicationTrackerZKImpl { return this.isStopped; } } -} - +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java index ff6a079..f745f8c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java @@ -50,6 +50,8 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.wal.WAL; @@ -115,7 +117,7 @@ public class TestReplicationSourceManager { private static Path oldLogDir; private static Path logDir; - + private static CountDownLatch latch; private static List files = new ArrayList(); @@ -154,7 +156,7 @@ public class TestReplicationSourceManager { HConstants.HREGION_LOGDIR_NAME); replication = new Replication(new DummyServer(), fs, logDir, oldLogDir); manager = replication.getReplicationManager(); - + manager.addSource(slaveId); htd = new HTableDescriptor(test); @@ -243,7 +245,7 @@ public class TestReplicationSourceManager { // TODO Need a case with only 2 WALs and we only want to delete the first one } - + @Test public void testClaimQueues() throws Exception { LOG.debug("testNodeFailoverWorkerCopyQueuesFromRSUsingMulti"); @@ -286,7 +288,7 @@ public class TestReplicationSourceManager { assertEquals(1, populatedMap); server.abort("", null); } - + @Test public void testCleanupFailoverQueues() throws Exception { final Server server = new DummyServer("hostname1.example.org"); @@ -366,8 +368,8 @@ public class TestReplicationSourceManager { server.abort("", null); } - - + + static class DummyNodeFailoverWorker extends Thread { private SortedMap> logZnodesMap; Server server; @@ -416,7 +418,7 @@ public class TestReplicationSourceManager { return 0; } } - + static class DummyServer implements Server { String hostname; @@ -443,7 +445,7 @@ public class TestReplicationSourceManager { return null; } @Override - public HConnection getShortCircuitConnection() { + public ClusterConnection getConnection() { return null; } @@ -477,6 +479,4 @@ public class TestReplicationSourceManager { return false; // To change body of implemented methods use File | Settings | File Templates. } } - } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index 76aa656..b1a6ccd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -27,8 +27,6 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.security.PrivilegedAction; import java.util.List; -import java.util.Map; -import java.util.NavigableMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -44,6 +42,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MiniHBaseCluster; @@ -61,6 +60,7 @@ import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; @@ -436,20 +436,19 @@ public class TestAccessController extends SecureTestUtil { @Test public void testMove() throws Exception { - Map regions; - HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE.getTableName()); - try { - regions = table.getRegionLocations(); - } finally { - table.close(); + List regions; + try (RegionLocator locator = + TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE.getTableName())) { + regions = locator.getAllRegionLocations(); } - final Map.Entry firstRegion = regions.entrySet().iterator().next(); - final ServerName server = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); + HRegionLocation location = regions.get(0); + final HRegionInfo hri = location.getRegionInfo(); + final ServerName server = location.getServerName(); AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preMove(ObserverContext.createAndPrepare(CP_ENV, null), - firstRegion.getKey(), server, server); + hri, server, server); return null; } }; @@ -460,20 +459,17 @@ public class TestAccessController extends SecureTestUtil { @Test public void testAssign() throws Exception { - Map regions; - HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE.getTableName()); - try { - regions = table.getRegionLocations(); - } finally { - table.close(); + List regions; + try (RegionLocator locator = + TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE.getTableName())) { + regions = locator.getAllRegionLocations(); } - final Map.Entry firstRegion = regions.entrySet().iterator().next(); - + HRegionLocation location = regions.get(0); + final HRegionInfo hri = location.getRegionInfo(); AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preAssign(ObserverContext.createAndPrepare(CP_ENV, null), - firstRegion.getKey()); + ACCESS_CONTROLLER.preAssign(ObserverContext.createAndPrepare(CP_ENV, null), hri); return null; } }; @@ -484,20 +480,17 @@ public class TestAccessController extends SecureTestUtil { @Test public void testUnassign() throws Exception { - Map regions; - HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE.getTableName()); - try { - regions = table.getRegionLocations(); - } finally { - table.close(); + List regions; + try (RegionLocator locator = + TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE.getTableName())) { + regions = locator.getAllRegionLocations(); } - final Map.Entry firstRegion = regions.entrySet().iterator().next(); - + HRegionLocation location = regions.get(0); + final HRegionInfo hri = location.getRegionInfo(); AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preUnassign(ObserverContext.createAndPrepare(CP_ENV, null), - firstRegion.getKey(), false); + ACCESS_CONTROLLER.preUnassign(ObserverContext.createAndPrepare(CP_ENV, null), hri, false); return null; } }; @@ -508,20 +501,17 @@ public class TestAccessController extends SecureTestUtil { @Test public void testRegionOffline() throws Exception { - Map regions; - HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE.getTableName()); - try { - regions = table.getRegionLocations(); - } finally { - table.close(); + List regions; + try (RegionLocator locator = + TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE.getTableName())) { + regions = locator.getAllRegionLocations(); } - final Map.Entry firstRegion = regions.entrySet().iterator().next(); - + HRegionLocation location = regions.get(0); + final HRegionInfo hri = location.getRegionInfo(); AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { - ACCESS_CONTROLLER.preRegionOffline(ObserverContext.createAndPrepare(CP_ENV, null), - firstRegion.getKey()); + ACCESS_CONTROLLER.preRegionOffline(ObserverContext.createAndPrepare(CP_ENV, null), hri); return null; } }; @@ -922,14 +912,12 @@ public class TestAccessController extends SecureTestUtil { //set global read so RegionServer can move it setPermission(loadPath, FsPermission.valueOf("-rwxrwxrwx")); - HTable table = new HTable(conf, tableName); - try { - Admin admin = TEST_UTIL.getHBaseAdmin(); - TEST_UTIL.waitTableEnabled(admin, tableName.getName()); - LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf); - loader.doBulkLoad(loadPath, table); - } finally { - table.close(); + try (HTable table = (HTable)TEST_UTIL.getConnection().getTable(tableName)) { + try (Admin admin = TEST_UTIL.getHBaseAdmin()) { + TEST_UTIL.waitTableEnabled(admin, tableName.getName()); + LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf); + loader.doBulkLoad(loadPath, table); + } } } @@ -1988,18 +1976,19 @@ public class TestAccessController extends SecureTestUtil { final HRegionServer newRs = newRsThread.getRegionServer(); // Move region to the new RegionServer. - final HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE2); - try { - NavigableMap regions = table - .getRegionLocations(); - final Map.Entry firstRegion = regions.entrySet() - .iterator().next(); - + List regions; + try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE2)) { + regions = locator.getAllRegionLocations(); + } + HRegionLocation location = regions.get(0); + final HRegionInfo hri = location.getRegionInfo(); + final ServerName server = location.getServerName(); + try (HTable table = (HTable)TEST_UTIL.getConnection().getTable(TEST_TABLE2)) { AccessTestAction moveAction = new AccessTestAction() { @Override public Object run() throws Exception { - admin.move(firstRegion.getKey().getEncodedNameAsBytes(), - Bytes.toBytes(newRs.getServerName().getServerName())); + admin.move(hri.getEncodedNameAsBytes(), + Bytes.toBytes(newRs.getServerName().getServerName())); return null; } }; @@ -2031,8 +2020,6 @@ public class TestAccessController extends SecureTestUtil { } }; USER_ADMIN.runAs(putAction); - } finally { - table.close(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java index c41e977..2bde357 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java @@ -17,7 +17,9 @@ */ package org.apache.hadoop.hbase.security.access; -import static org.junit.Assert.*; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; import java.util.List; @@ -26,7 +28,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -77,7 +80,7 @@ public class TestAccessController2 extends SecureTestUtil { @Test public void testCreateWithCorrectOwner() throws Exception { // Create a test user - User testUser = User.createUserForTesting(TEST_UTIL.getConfiguration(), "TestUser", + final User testUser = User.createUserForTesting(TEST_UTIL.getConfiguration(), "TestUser", new String[0]); // Grant the test user the ability to create tables SecureTestUtil.grantGlobal(TEST_UTIL, testUser.getShortName(), Action.CREATE); @@ -86,11 +89,11 @@ public class TestAccessController2 extends SecureTestUtil { public Object run() throws Exception { HTableDescriptor desc = new HTableDescriptor(TEST_TABLE.getTableName()); desc.addFamily(new HColumnDescriptor(TEST_FAMILY)); - Admin admin = new HBaseAdmin(conf); - try { - admin.createTable(desc); - } finally { - admin.close(); + try (Connection connection = + ConnectionFactory.createConnection(TEST_UTIL.getConfiguration(), testUser)) { + try (Admin admin = connection.getAdmin()) { + admin.createTable(desc); + } } return null; } @@ -98,7 +101,8 @@ public class TestAccessController2 extends SecureTestUtil { TEST_UTIL.waitTableEnabled(TEST_TABLE.getTableName()); // Verify that owner permissions have been granted to the test user on the // table just created - List perms = AccessControlLists.getTablePermissions(conf, TEST_TABLE.getTableName()) + List perms = + AccessControlLists.getTablePermissions(conf, TEST_TABLE.getTableName()) .get(testUser.getShortName()); assertNotNull(perms); assertFalse(perms.isEmpty()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java index ce143ab..6a62071 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.security.token; -import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; @@ -145,7 +145,7 @@ public class TestTokenAuthentication { } @Override - public HConnection getShortCircuitConnection() { + public ClusterConnection getConnection() { return null; } @@ -316,7 +316,6 @@ public class TestTokenAuthentication { } } - private static HBaseTestingUtility TEST_UTIL; private static TokenServer server; private static Thread serverThread; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java index ca83eb2..018a417 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.client.HConnection; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -102,7 +102,7 @@ public class MockServer implements Server { } @Override - public HConnection getShortCircuitConnection() { + public ClusterConnection getConnection() { return null; } @@ -121,4 +121,4 @@ public class MockServer implements Server { // TODO Auto-generated method stub return this.aborted; } -} +} \ No newline at end of file