diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 3282838..7aa330c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -34,11 +34,13 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -173,14 +175,13 @@ public class MetaTableAccessor { * @throws IOException * @SuppressWarnings("deprecation") */ - private static Table getHTable(final Connection connection, - final TableName tableName) + private static Table getHTable(final Connection connection, final TableName tableName) throws IOException { // We used to pass whole CatalogTracker in here, now we just pass in Connection if (connection == null || connection.isClosed()) { throw new NullPointerException("No connection"); } - return new HTable(tableName, connection); + return connection.getTable(tableName); } /** @@ -867,31 +868,30 @@ public class MetaTableAccessor { /** * Count regions in hbase:meta for passed table. - * @param c Configuration object + * Do not use if you can. Use {@link #getRegionCount(Connection, TableName)} instead. + * @param configuration * @param tableName table name to count regions for * @return Count or regions in table tableName * @throws IOException */ - @Deprecated - public static int getRegionCount(final Configuration c, final String tableName) - throws IOException { - return getRegionCount(c, TableName.valueOf(tableName)); + public static int getRegionCount(final Configuration configuration, final TableName tableName) + throws IOException { + try (Connection connection = ConnectionFactory.createConnection(configuration)) { + return getRegionCount(connection, tableName); + } } /** * Count regions in hbase:meta for passed table. - * @param c Configuration object + * @param connection * @param tableName table name to count regions for * @return Count or regions in table tableName * @throws IOException */ - public static int getRegionCount(final Configuration c, final TableName tableName) - throws IOException { - HTable t = new HTable(c, tableName); - try { - return t.getRegionLocations().size(); - } finally { - t.close(); + public static int getRegionCount(final Connection connection, final TableName tableName) + throws IOException { + try (RegionLocator locator = connection.getRegionLocator(tableName)) { + return locator.getAllRegionLocations().size(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index ae2b827..69b33d0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -595,7 +595,7 @@ public interface Admin extends Abortable, Closeable { /** * Compact all regions on the region server - * @param regionserver the region server name + * @param sn the region server name * @param major if it's major compaction * @throws IOException * @throws InterruptedException @@ -1289,7 +1289,7 @@ public interface Admin extends Abortable, Closeable { * @return A RegionServerCoprocessorRpcChannel instance */ CoprocessorRpcChannel coprocessorService(ServerName sn); - + /** * Update the configuration and trigger an online config change diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index 374ce28..b489af2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.security.UserProvider; * A non-instantiable class that manages creation of {@link Connection}s. * Managing the lifecycle of the {@link Connection}s to the cluster is the responsibility of * the caller. - * From this {@link Connection} {@link Table} implementations are retrieved + * From a {@link Connection}, {@link Table} implementations are retrieved * with {@link Connection#getTable(TableName)}. Example: *
  * Connection connection = ConnectionFactory.createConnection(config);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index b3a6295..b5cef3d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -180,7 +180,7 @@ import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 
 /**
- * An internal, A non-instantiable class that manages creation of {@link HConnection}s.
+ * An internal, non-instantiable class that manages creation of {@link HConnection}s.
  */
 @SuppressWarnings("serial")
 @InterfaceAudience.Private
@@ -774,16 +774,7 @@ class ConnectionManager {
      * @throws IOException
      */
     private Registry setupRegistry() throws IOException {
-      String registryClass = this.conf.get("hbase.client.registry.impl",
-        ZooKeeperRegistry.class.getName());
-      Registry registry = null;
-      try {
-        registry = (Registry)Class.forName(registryClass).newInstance();
-      } catch (Throwable t) {
-        throw new IOException(t);
-      }
-      registry.init(this);
-      return registry;
+      return RegistryFactory.getRegistry(this);
     }
 
     /**
@@ -1010,8 +1001,8 @@ class ConnectionManager {
     @Override
     public List locateRegions(final TableName tableName,
         final boolean useCache, final boolean offlined) throws IOException {
-      NavigableMap regions = MetaScanner.allTableRegions(conf, this,
-          tableName);
+      NavigableMap regions =
+        MetaScanner.allTableRegions(conf, this, tableName);
       final List locations = new ArrayList();
       for (HRegionInfo regionInfo : regions.keySet()) {
         RegionLocations list = locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index e26ae48..88a3c59 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -111,7 +111,7 @@ public class ConnectionUtils {
    * @param client the client interface of the local server
    * @return an adapted/decorated HConnection
    */
-  public static HConnection createShortCircuitHConnection(final HConnection conn,
+  public static HConnection createShortCircuitHConnection(final Connection conn,
       final ServerName serverName, final AdminService.BlockingInterface admin,
       final ClientService.BlockingInterface client) {
     return new ConnectionAdapter(conn) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index aec5ae8..cb72a88 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -83,7 +83,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterReque
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
@@ -3734,7 +3733,7 @@ public class HBaseAdmin implements Admin {
   public CoprocessorRpcChannel coprocessorService(ServerName sn) {
     return new RegionServerCoprocessorRpcChannel(connection, sn);
   }
-  
+
   @Override
   public void updateConfiguration(ServerName server) throws IOException {
     try {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index c3a94e3..dbaaffa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -80,24 +80,23 @@ import com.google.protobuf.Service;
 import com.google.protobuf.ServiceException;
 
 /**
- *
- * HTable is no longer a client API. It is marked InterfaceAudience.Private indicating that
- * this is an HBase-internal class as defined in
- * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
- * There are no guarantees for backwards source / binary compatibility and methods or class can
- * change or go away without deprecation. Use {@link Connection#getTable(TableName)}
- * to obtain an instance of {@link Table} instead of constructing an HTable directly.
- * 

An implementation of {@link Table}. Used to communicate with a single HBase table. + * An implementation of {@link Table}. Used to communicate with a single HBase table. * Lightweight. Get as needed and just close when done. * Instances of this class SHOULD NOT be constructed directly. * Obtain an instance via {@link Connection}. See {@link ConnectionFactory} * class comment for an example of how. * - *

This class is NOT thread safe for reads nor write. + *

This class is NOT thread safe for reads nor writes. * In the case of writes (Put, Delete), the underlying write buffer can * be corrupted if multiple threads contend over a single HTable instance. * In the case of reads, some fields used by a Scan are shared among all threads. * + *

HTable is no longer a client API. Use {@link Table} instead. It is marked + * InterfaceAudience.Private indicating that this is an HBase-internal class as defined in + * Hadoop Interface Classification + * There are no guarantees for backwards source / binary compatibility and methods or class can + * change or go away without deprecation. + * * @see Table * @see Admin * @see Connection @@ -291,6 +290,8 @@ public class HTable implements HTableInterface, RegionLocator { /** * Creates an object to access a HBase table. + * Used by HBase internally. DO NOT USE. See {@link ConnectionFactory} class comment for how to + * get a {@link Table} instance (use {@link Table} instead of {@link HTable}). * @param tableName Name of the table. * @param connection HConnection to be used. * @param pool ExecutorService to be used. @@ -1799,11 +1800,11 @@ public class HTable implements HTableInterface, RegionLocator { * @throws IOException */ public static void main(String[] args) throws IOException { - Table t = new HTable(HBaseConfiguration.create(), args[0]); - try { - System.out.println(t.get(new Get(Bytes.toBytes(args[1])))); - } finally { - t.close(); + Configuration conf = HBaseConfiguration.create(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(TableName.valueOf(args[0]))) { + System.out.println(table.get(new Get(Bytes.toBytes(args[1])))); + } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java index 5312dfb..6dd53ec 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java @@ -41,6 +41,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ExceptionUtil; +import com.google.common.annotations.VisibleForTesting; + /** * Scanner class that contains the hbase:meta table scanning logic. * Provided visitors will be called for each row. @@ -59,13 +61,16 @@ public class MetaScanner { /** * Scans the meta table and calls a visitor on each RowResult and uses a empty * start row value as table name. + * + *

Visible for testing. Use {@link + * #metaScan(Configuration, Connection, MetaScannerVisitor, TableName)} instead. * * @param configuration conf * @param visitor A custom visitor * @throws IOException e */ - public static void metaScan(Configuration configuration, - MetaScannerVisitor visitor) + @VisibleForTesting // Do not use. Used by tests only and hbck. + public static void metaScan(Configuration configuration, MetaScannerVisitor visitor) throws IOException { metaScan(configuration, visitor, null, null, Integer.MAX_VALUE); } @@ -91,6 +96,9 @@ public class MetaScanner { * Scans the meta table and calls a visitor on each RowResult. Uses a table * name and a row name to locate meta regions. And it only scans at most * rowLimit of rows. + * + *

Visible for testing. Use {@link + * #metaScan(Configuration, Connection, MetaScannerVisitor, TableName)} instead. * * @param configuration HBase configuration. * @param visitor Visitor object. @@ -102,12 +110,12 @@ public class MetaScanner { * will be set to default value Integer.MAX_VALUE. * @throws IOException e */ + @VisibleForTesting // Do not use. Used by Master but by a method that is used testing. public static void metaScan(Configuration configuration, MetaScannerVisitor visitor, TableName userTableName, byte[] row, int rowLimit) throws IOException { - metaScan(configuration, null, visitor, userTableName, row, rowLimit, - TableName.META_TABLE_NAME); + metaScan(configuration, null, visitor, userTableName, row, rowLimit, TableName.META_TABLE_NAME); } /** @@ -133,7 +141,7 @@ public class MetaScanner { throws IOException { boolean closeConnection = false; - if (connection == null){ + if (connection == null) { connection = ConnectionFactory.createConnection(configuration); closeConnection = true; } @@ -141,16 +149,16 @@ public class MetaScanner { int rowUpperLimit = rowLimit > 0 ? rowLimit: Integer.MAX_VALUE; // Calculate startrow for scan. byte[] startRow; - ResultScanner scanner = null; - HTable metaTable = null; - try { - metaTable = new HTable(TableName.META_TABLE_NAME, connection, null); + try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { if (row != null) { // Scan starting at a particular row in a particular table byte[] searchRow = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false); - - Result startRowResult = metaTable.getRowOrBefore(searchRow, HConstants.CATALOG_FAMILY); - + Scan scan = + Scan.createGetClosestRowOrBeforeReverseScan(searchRow, HConstants.CATALOG_FAMILY); + Result startRowResult = null; + try (ResultScanner resultScanner = metaTable.getScanner(scan)) { + startRowResult = resultScanner.next(); + } if (startRowResult == null) { throw new TableNotFoundException("Cannot find row in "+ TableName .META_TABLE_NAME.getNameAsString()+" for table: " @@ -184,25 +192,18 @@ public class MetaScanner { Bytes.toStringBinary(startRow) + " for max=" + rowUpperLimit + " with caching=" + rows); } // Run the scan - scanner = metaTable.getScanner(scan); - Result result; - int processedRows = 0; - while ((result = scanner.next()) != null) { - if (visitor != null) { - if (!visitor.processRow(result)) break; + try (ResultScanner resultScanner = metaTable.getScanner(scan)) { + Result result; + int processedRows = 0; + while ((result = resultScanner.next()) != null) { + if (visitor != null) { + if (!visitor.processRow(result)) break; + } + processedRows++; + if (processedRows >= rowUpperLimit) break; } - processedRows++; - if (processedRows >= rowUpperLimit) break; } } finally { - if (scanner != null) { - try { - scanner.close(); - } catch (Throwable t) { - ExceptionUtil.rethrowIfInterrupt(t); - LOG.debug("Got exception in closing the result scanner", t); - } - } if (visitor != null) { try { visitor.close(); @@ -211,16 +212,8 @@ public class MetaScanner { LOG.debug("Got exception in closing the meta scanner visitor", t); } } - if (metaTable != null) { - try { - metaTable.close(); - } catch (Throwable t) { - ExceptionUtil.rethrowIfInterrupt(t); - LOG.debug("Got exception in closing meta table", t); - } - } if (closeConnection) { - connection.close(); + if (connection != null) connection.close(); } } } @@ -246,6 +239,7 @@ public class MetaScanner { * @return List of all user-space regions. * @throws IOException */ + @VisibleForTesting // And for hbck. public static List listAllRegions(Configuration conf, final boolean offlined) throws IOException { final List regions = new ArrayList(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java index 8168fe1..754beb0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.util.Pair; /** * Used to view region location information for a single HBase table. - * Obtain an instance from an {@link HConnection}. + * Obtain an instance from a {@link Connection}. * * @see ConnectionFactory * @see Connection @@ -98,4 +98,4 @@ public interface RegionLocator extends Closeable { * Gets the fully qualified table name instance of this table. */ TableName getName(); -} +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java index c6ed801..412e4fa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java @@ -20,11 +20,14 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.classification.InterfaceAudience; /** * Cluster registry. - * Implemenations hold cluster information such as this cluster's id, location of hbase:meta, etc. + * Implementations hold cluster information such as this cluster's id, location of hbase:meta, etc. + * Internal use only. */ +@InterfaceAudience.Private interface Registry { /** * @param connection @@ -47,4 +50,4 @@ interface Registry { * @throws IOException */ int getCurrentNrHRS() throws IOException; -} +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java new file mode 100644 index 0000000..dc2cb7c --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * Get instance of configured Registry. + */ +@InterfaceAudience.Private +class RegistryFactory { + /** + * @return The cluster registry implementation to use. + * @throws IOException + */ + static Registry getRegistry(final Connection connection) + throws IOException { + String registryClass = connection.getConfiguration().get("hbase.client.registry.impl", + ZooKeeperRegistry.class.getName()); + Registry registry = null; + try { + registry = (Registry)Class.forName(registryClass).newInstance(); + } catch (Throwable t) { + throw new IOException(t); + } + registry.init(connection); + return registry; + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index 2aea19f..3a0ce69 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -52,8 +52,8 @@ import org.apache.hadoop.hbase.util.Bytes; * To scan everything for each row, instantiate a Scan object. *

* To modify scanner caching for just this scan, use {@link #setCaching(int) setCaching}. - * If caching is NOT set, we will use the caching value of the hosting {@link HTable}. See - * {@link HTable#setScannerCaching(int)}. In addition to row caching, it is possible to specify a + * If caching is NOT set, we will use the caching value of the hosting {@link Table}. + * In addition to row caching, it is possible to specify a * maximum result size, using {@link #setMaxResultSize(long)}. When both are used, * single server requests are limited by either number of rows or maximum result size, whichever * limit comes first. @@ -478,7 +478,8 @@ public class Scan extends Query { /** * Set the number of rows for caching that will be passed to scanners. - * If not set, the default setting from {@link HTable#getScannerCaching()} will apply. + * If not set, the Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will + * apply. * Higher caching values will enable faster scanners but will use more memory. * @param caching the number of rows for caching */ @@ -894,4 +895,19 @@ public class Scan extends Query { return (Scan) super.setIsolationLevel(level); } -} + /** + * Utility that creates a Scan that will do a small scan in reverse from passed row+family + * looking for next closest row. + * @param row + * @param family + * @return An instance of Scan primed with passed row and family to + * scan in reverse for one row only. + */ + static Scan createGetClosestRowOrBeforeReverseScan(byte[] row, byte[] family) { + Scan scan = new Scan(row, family); + scan.setSmall(true); + scan.setReversed(true); + scan.setCaching(1); + return scan; + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java index 85ce4e2..7b7cd16 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client.coprocessor; +import java.io.Closeable; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -36,7 +37,8 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; @@ -72,19 +74,32 @@ import com.google.protobuf.Message; *

  • For methods to find maximum, minimum, sum, rowcount, it returns the * parameter type. For average and std, it returns a double value. For row * count, it returns a long value. + *

    Call {@link #close()} when done. */ @InterfaceAudience.Private -public class AggregationClient { - +public class AggregationClient implements Closeable { + // TODO: This class is not used. Move to examples? private static final Log log = LogFactory.getLog(AggregationClient.class); - Configuration conf; + private final Connection connection; /** * Constructor with Conf object * @param cfg */ public AggregationClient(Configuration cfg) { - this.conf = cfg; + try { + // Create a connection on construction. Will use it making each of the calls below. + this.connection = ConnectionFactory.createConnection(cfg); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public void close() throws IOException { + if (this.connection != null && !this.connection.isClosed()) { + this.connection.close(); + } } /** @@ -101,15 +116,9 @@ public class AggregationClient { */ public R max( final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); + throws Throwable { + try (Table table = connection.getTable(tableName)) { return max(table, ci, scan); - } finally { - if (table != null) { - table.close(); - } } } @@ -196,15 +205,9 @@ public class AggregationClient { */ public R min( final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); + throws Throwable { + try (Table table = connection.getTable(tableName)) { return min(table, ci, scan); - } finally { - if (table != null) { - table.close(); - } } } @@ -276,15 +279,9 @@ public class AggregationClient { */ public long rowCount( final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); - return rowCount(table, ci, scan); - } finally { - if (table != null) { - table.close(); - } + throws Throwable { + try (Table table = connection.getTable(tableName)) { + return rowCount(table, ci, scan); } } @@ -350,15 +347,9 @@ public class AggregationClient { */ public S sum( final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); - return sum(table, ci, scan); - } finally { - if (table != null) { - table.close(); - } + throws Throwable { + try (Table table = connection.getTable(tableName)) { + return sum(table, ci, scan); } } @@ -424,14 +415,8 @@ public class AggregationClient { private Pair getAvgArgs( final TableName tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); - return getAvgArgs(table, ci, scan); - } finally { - if (table != null) { - table.close(); - } + try (Table table = connection.getTable(tableName)) { + return getAvgArgs(table, ci, scan); } } @@ -615,14 +600,8 @@ public class AggregationClient { public double std(final TableName tableName, ColumnInterpreter ci, Scan scan) throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); - return std(table, ci, scan); - } finally { - if (table != null) { - table.close(); - } + try (Table table = connection.getTable(tableName)) { + return std(table, ci, scan); } } @@ -728,14 +707,8 @@ public class AggregationClient { public R median(final TableName tableName, ColumnInterpreter ci, Scan scan) throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); - return median(table, ci, scan); - } finally { - if (table != null) { - table.close(); - } + try (Table table = connection.getTable(tableName)) { + return median(table, ci, scan); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java index e808904..cd172b0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java @@ -28,23 +28,26 @@ Provides HBase Client

    Overview

    To administer HBase, create and drop tables, list and alter tables, - use {@link org.apache.hadoop.hbase.client.HBaseAdmin}. Once created, table access is via an instance - of {@link org.apache.hadoop.hbase.client.HTable}. You add content to a table a row at a time. To insert, - create an instance of a {@link org.apache.hadoop.hbase.client.Put} object. Specify value, target column - and optionally a timestamp. Commit your update using {@link org.apache.hadoop.hbase.client.HTable#put(Put)}. - To fetch your inserted value, use {@link org.apache.hadoop.hbase.client.Get}. The Get can be specified to be broad -- get all - on a particular row -- or narrow; i.e. return only a single cell value. After creating an instance of - Get, invoke {@link org.apache.hadoop.hbase.client.HTable#get(Get)}. Use - {@link org.apache.hadoop.hbase.client.Scan} to set up a scanner -- a Cursor- like access. After - creating and configuring your Scan instance, call {@link org.apache.hadoop.hbase.client.HTable#getScanner(Scan)} and then - invoke next on the returned object. Both {@link org.apache.hadoop.hbase.client.HTable#get(Get)} and - {@link org.apache.hadoop.hbase.client.HTable#getScanner(Scan)} return a + use {@link org.apache.hadoop.hbase.client.Admin}. Once created, table access is via an instance + of {@link org.apache.hadoop.hbase.client.Table}. You add content to a table a row at a time. To + insert, create an instance of a {@link org.apache.hadoop.hbase.client.Put} object. Specify value, + target column and optionally a timestamp. Commit your update using + {@link org.apache.hadoop.hbase.client.Table#put(Put)}. + To fetch your inserted value, use {@link org.apache.hadoop.hbase.client.Get}. The Get can be + specified to be broad -- get all on a particular row -- or narrow; i.e. return only a single cell + value. After creating an instance of + Get, invoke {@link org.apache.hadoop.hbase.client.Table#get(Get)}. + +

    Use {@link org.apache.hadoop.hbase.client.Scan} to set up a scanner -- a Cursor- like access. + After creating and configuring your Scan instance, call + {@link org.apache.hadoop.hbase.client.Table#getScanner(Scan)} and then + invoke next on the returned object. Both {@link org.apache.hadoop.hbase.client.Table#get(Get)} + and {@link org.apache.hadoop.hbase.client.Table#getScanner(Scan)} return a {@link org.apache.hadoop.hbase.client.Result}. -A Result is a List of {@link org.apache.hadoop.hbase.KeyValue}s. It has facility for packaging the return -in different formats. - Use {@link org.apache.hadoop.hbase.client.Delete} to remove content. + +

    Use {@link org.apache.hadoop.hbase.client.Delete} to remove content. You can remove individual cells or entire families, etc. Pass it to - {@link org.apache.hadoop.hbase.client.HTable#delete(Delete)} to execute. + {@link org.apache.hadoop.hbase.client.Table#delete(Delete)} to execute.

    Puts, Gets and Deletes take out a lock on the target row for the duration of their operation. Concurrent modifications to a single row are serialized. Gets and scans run concurrently without @@ -68,8 +71,11 @@ in different formats. import java.io.IOException; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -87,9 +93,16 @@ public class MyLittleHBaseClient { // be found on the CLASSPATH Configuration config = HBaseConfiguration.create(); - // This instantiates an HTable object that connects you to - // the "myLittleHBaseTable" table. - HTable table = new HTable(config, "myLittleHBaseTable"); + // Next you need a Connection to the cluster. Create one. When done with it, + // close it (Should start a try/finally after this creation so it gets closed + // for sure but leaving this out for readibility's sake). + Connection connection = ConnectionFactory.createConnection(config); + + // This instantiates a Table object that connects you to + // the "myLittleHBaseTable" table (TableName.valueOf turns String into TableName instance). + // When done with it, close it (Should start a try/finally after this creation so it gets + // closed for sure but leaving this out for readibility's sake). + Table table = connection.getTable(TableName.valueOf("myLittleHBaseTable")); // To add to a row, use Put. A Put constructor takes the name of the row // you want to insert into as a byte array. In HBase, the Bytes class has @@ -152,15 +165,19 @@ public class MyLittleHBaseClient { // Thats why we have it inside a try/finally clause scanner.close(); } + + // Close your table and cluster connection. + table.close(); + connection.close(); } }

  • There are many other methods for putting data into and getting data out of - HBase, but these examples should get you started. See the HTable javadoc for + HBase, but these examples should get you started. See the Table javadoc for more methods. Additionally, there are methods for managing tables in the - HBaseAdmin class.

    + Admin class.

    If your client is NOT Java, then you should consider the Thrift or REST libraries.

    @@ -168,20 +185,14 @@ public class MyLittleHBaseClient {

    Related Documentation

    -

    There are many other methods for putting data into and getting data out of - HBase, but these examples should get you started. See the HTable javadoc for - more methods. Additionally, there are methods for managing tables in the - HBaseAdmin class.

    -

    See also the section in the HBase Reference Guide where it discusses HBase Client. It - has section on how to access HBase from inside your multithreaded environtment + has section on how to access HBase from inside your multithreaded environment how to control resources consumed client-side, etc.

    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java index f13ce28..68c8e0a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java @@ -30,10 +30,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.util.StringUtils; @@ -47,23 +49,40 @@ public class QuotaRetriever implements Closeable, Iterable { private final Queue cache = new LinkedList(); private ResultScanner scanner; - private HTable table; + /** + * Connection to use. + * Could pass one in and have this class use it but this class wants to be standalone. + */ + private Connection connection; + private Table table; private QuotaRetriever() { } void init(final Configuration conf, final Scan scan) throws IOException { - table = new HTable(conf, QuotaTableUtil.QUOTA_TABLE_NAME); + this.connection = ConnectionFactory.createConnection(conf); + this.table = this.connection.getTable(QuotaTableUtil.QUOTA_TABLE_NAME); try { scanner = table.getScanner(scan); } catch (IOException e) { - table.close(); + try { + close(); + } catch (IOException ioe) { + LOG.warn("Failed getting scanner and then failed close on cleanup", e); + } throw e; } } public void close() throws IOException { - table.close(); + if (this.table != null) { + this.table.close(); + this.table = null; + } + if (this.connection != null) { + this.connection.close(); + this.connection = null; + } } public QuotaSettings next() throws IOException { @@ -163,4 +182,4 @@ public class QuotaRetriever implements Closeable, Iterable { scanner.init(conf, scan); return scanner; } -} +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java index 6153876..8ea9572 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java @@ -27,15 +27,15 @@ import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.CompareFilter; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterList; @@ -78,41 +78,42 @@ public class QuotaTableUtil { /* ========================================================================= * Quota "settings" helpers */ - public static Quotas getTableQuota(final Configuration conf, final TableName table) + public static Quotas getTableQuota(final Connection connection, final TableName table) throws IOException { - return getQuotas(conf, getTableRowKey(table)); + return getQuotas(connection, getTableRowKey(table)); } - public static Quotas getNamespaceQuota(final Configuration conf, final String namespace) + public static Quotas getNamespaceQuota(final Connection connection, final String namespace) throws IOException { - return getQuotas(conf, getNamespaceRowKey(namespace)); + return getQuotas(connection, getNamespaceRowKey(namespace)); } - public static Quotas getUserQuota(final Configuration conf, final String user) + public static Quotas getUserQuota(final Connection connection, final String user) throws IOException { - return getQuotas(conf, getUserRowKey(user)); + return getQuotas(connection, getUserRowKey(user)); } - public static Quotas getUserQuota(final Configuration conf, final String user, + public static Quotas getUserQuota(final Connection connection, final String user, final TableName table) throws IOException { - return getQuotas(conf, getUserRowKey(user), getSettingsQualifierForUserTable(table)); + return getQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserTable(table)); } - public static Quotas getUserQuota(final Configuration conf, final String user, + public static Quotas getUserQuota(final Connection connection, final String user, final String namespace) throws IOException { - return getQuotas(conf, getUserRowKey(user), getSettingsQualifierForUserNamespace(namespace)); + return getQuotas(connection, getUserRowKey(user), + getSettingsQualifierForUserNamespace(namespace)); } - private static Quotas getQuotas(final Configuration conf, final byte[] rowKey) + private static Quotas getQuotas(final Connection connection, final byte[] rowKey) throws IOException { - return getQuotas(conf, rowKey, QUOTA_QUALIFIER_SETTINGS); + return getQuotas(connection, rowKey, QUOTA_QUALIFIER_SETTINGS); } - private static Quotas getQuotas(final Configuration conf, final byte[] rowKey, + private static Quotas getQuotas(final Connection connection, final byte[] rowKey, final byte[] qualifier) throws IOException { Get get = new Get(rowKey); get.addColumn(QUOTA_FAMILY_INFO, qualifier); - Result result = doGet(conf, get); + Result result = doGet(connection, get); if (result.isEmpty()) { return null; } @@ -321,23 +322,18 @@ public class QuotaTableUtil { /* ========================================================================= * HTable helpers */ - protected static Result doGet(final Configuration conf, final Get get) + protected static Result doGet(final Connection connection, final Get get) throws IOException { - HTable table = new HTable(conf, QUOTA_TABLE_NAME); - try { + try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { return table.get(get); - } finally { - table.close(); } } - protected static Result[] doGet(final Configuration conf, final List gets) + protected static Result[] doGet(final Connection connection, final List gets) throws IOException { - HTable table = new HTable(conf, QUOTA_TABLE_NAME); - try { + // TODO: Pass in a Connection to use rather than create one. + try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { return table.get(gets); - } finally { - table.close(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java index 922bf67..ae43c17 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java @@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -50,11 +50,7 @@ public class AccessControlClient { public static final TableName ACL_TABLE_NAME = TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "acl"); - private static HTable getAclTable(Configuration conf) throws IOException { - return new HTable(conf, ACL_TABLE_NAME); - } - - private static BlockingInterface getAccessControlServiceStub(HTable ht) + private static BlockingInterface getAccessControlServiceStub(Table ht) throws IOException { CoprocessorRpcChannel service = ht.coprocessorService(HConstants.EMPTY_START_ROW); BlockingInterface protocol = @@ -75,14 +71,12 @@ public class AccessControlClient { public static void grant(Configuration conf, final TableName tableName, final String userName, final byte[] family, final byte[] qual, final Permission.Action... actions) throws Throwable { - HTable ht = null; - try { - ht = getAclTable(conf); - ProtobufUtil.grant(getAccessControlServiceStub(ht), userName, tableName, family, qual, + // TODO: Make it so caller passes in a Connection rather than have us do this expensive + // setup each time. This class only used in test and shell at moment though. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + ProtobufUtil.grant(getAccessControlServiceStub(table), userName, tableName, family, qual, actions); - } finally { - if (ht != null) { - ht.close(); } } } @@ -97,26 +91,22 @@ public class AccessControlClient { */ public static void grant(Configuration conf, final String namespace, final String userName, final Permission.Action... actions) throws Throwable { - HTable ht = null; - try { - ht = getAclTable(conf); - ProtobufUtil.grant(getAccessControlServiceStub(ht), userName, namespace, actions); - } finally { - if (ht != null) { - ht.close(); + // TODO: Make it so caller passes in a Connection rather than have us do this expensive + // setup each time. This class only used in test and shell at moment though. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + ProtobufUtil.grant(getAccessControlServiceStub(table), userName, namespace, actions); } } } public static boolean isAccessControllerRunning(Configuration conf) throws MasterNotRunningException, ZooKeeperConnectionException, IOException { - HBaseAdmin ha = null; - try { - ha = new HBaseAdmin(conf); - return ha.isTableAvailable(ACL_TABLE_NAME); - } finally { - if (ha != null) { - ha.close(); + // TODO: Make it so caller passes in a Connection rather than have us do this expensive + // setup each time. This class only used in test and shell at moment though. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Admin admin = connection.getAdmin()) { + return admin.isTableAvailable(ACL_TABLE_NAME); } } } @@ -134,14 +124,12 @@ public class AccessControlClient { public static void revoke(Configuration conf, final TableName tableName, final String username, final byte[] family, final byte[] qualifier, final Permission.Action... actions) throws Throwable { - HTable ht = null; - try { - ht = getAclTable(conf); - ProtobufUtil.revoke(getAccessControlServiceStub(ht), username, tableName, family, qualifier, - actions); - } finally { - if (ht != null) { - ht.close(); + // TODO: Make it so caller passes in a Connection rather than have us do this expensive + // setup each time. This class only used in test and shell at moment though. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + ProtobufUtil.revoke(getAccessControlServiceStub(table), username, tableName, family, + qualifier, actions); } } } @@ -156,13 +144,11 @@ public class AccessControlClient { */ public static void revoke(Configuration conf, final String namespace, final String userName, final Permission.Action... actions) throws Throwable { - HTable ht = null; - try { - ht = getAclTable(conf); - ProtobufUtil.revoke(getAccessControlServiceStub(ht), userName, namespace, actions); - } finally { - if (ht != null) { - ht.close(); + // TODO: Make it so caller passes in a Connection rather than have us do this expensive + // setup each time. This class only used in test and shell at moment though. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + ProtobufUtil.revoke(getAccessControlServiceStub(table), userName, namespace, actions); } } } @@ -177,36 +163,29 @@ public class AccessControlClient { public static List getUserPermissions(Configuration conf, String tableRegex) throws Throwable { List permList = new ArrayList(); - Table ht = null; - Admin ha = null; - try { - ha = new HBaseAdmin(conf); - ht = new HTable(conf, ACL_TABLE_NAME); - CoprocessorRpcChannel service = ht.coprocessorService(HConstants.EMPTY_START_ROW); - BlockingInterface protocol = AccessControlProtos.AccessControlService - .newBlockingStub(service); - HTableDescriptor[] htds = null; - - if (tableRegex == null || tableRegex.isEmpty()) { - permList = ProtobufUtil.getUserPermissions(protocol); - } else if (tableRegex.charAt(0) == '@') { - String namespace = tableRegex.substring(1); - permList = ProtobufUtil.getUserPermissions(protocol, Bytes.toBytes(namespace)); - } else { - htds = ha.listTables(Pattern.compile(tableRegex)); - for (HTableDescriptor hd : htds) { - permList.addAll(ProtobufUtil.getUserPermissions(protocol, hd.getTableName())); + // TODO: Make it so caller passes in a Connection rather than have us do this expensive + // setup each time. This class only used in test and shell at moment though. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + try (Admin admin = connection.getAdmin()) { + CoprocessorRpcChannel service = table.coprocessorService(HConstants.EMPTY_START_ROW); + BlockingInterface protocol = + AccessControlProtos.AccessControlService.newBlockingStub(service); + HTableDescriptor[] htds = null; + if (tableRegex == null || tableRegex.isEmpty()) { + permList = ProtobufUtil.getUserPermissions(protocol); + } else if (tableRegex.charAt(0) == '@') { + String namespace = tableRegex.substring(1); + permList = ProtobufUtil.getUserPermissions(protocol, Bytes.toBytes(namespace)); + } else { + htds = admin.listTables(Pattern.compile(tableRegex)); + for (HTableDescriptor hd : htds) { + permList.addAll(ProtobufUtil.getUserPermissions(protocol, hd.getTableName())); + } + } } } - } finally { - if (ht != null) { - ht.close(); - } - if (ha != null) { - ha.close(); - } } return permList; } - -} +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java index 5ca5231..2818c24 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java @@ -26,7 +26,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; @@ -73,40 +74,39 @@ public class VisibilityClient { */ public static VisibilityLabelsResponse addLabels(Configuration conf, final String[] labels) throws Throwable { - Table ht = null; - try { - ht = new HTable(conf, LABELS_TABLE_NAME); - Batch.Call callable = - new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); + // TODO: Make it so caller passes in a Connection rather than have us do this expensive + // setup each time. This class only used in test and shell at moment though. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(LABELS_TABLE_NAME)) { + Batch.Call callable = + new Batch.Call() { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); - public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { - VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder(); - for (String label : labels) { - if (label.length() > 0) { - VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder(); - newBuilder.setLabel(ByteStringer.wrap(Bytes.toBytes(label))); - builder.addVisLabel(newBuilder.build()); + public VisibilityLabelsResponse call(VisibilityLabelsService service) + throws IOException { + VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder(); + for (String label : labels) { + if (label.length() > 0) { + VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder(); + newBuilder.setLabel(ByteStringer.wrap(Bytes.toBytes(label))); + builder.addVisLabel(newBuilder.build()); + } } + service.addLabels(controller, builder.build(), rpcCallback); + VisibilityLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - service.addLabels(controller, builder.build(), rpcCallback); - VisibilityLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; - Map result = ht.coprocessorService( - VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, - callable); - return result.values().iterator().next(); // There will be exactly one region for labels - // table and so one entry in result Map. - } finally { - if (ht != null) { - ht.close(); + }; + Map result = + table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY, callable); + return result.values().iterator().next(); // There will be exactly one region for labels + // table and so one entry in result Map. } } } @@ -131,33 +131,32 @@ public class VisibilityClient { * @throws Throwable */ public static GetAuthsResponse getAuths(Configuration conf, final String user) throws Throwable { - Table ht = null; - try { - ht = new HTable(conf, LABELS_TABLE_NAME); - Batch.Call callable = - new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); + // TODO: Make it so caller passes in a Connection rather than have us do this expensive + // setup each time. This class only used in test and shell at moment though. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(LABELS_TABLE_NAME)) { + Batch.Call callable = + new Batch.Call() { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); - public GetAuthsResponse call(VisibilityLabelsService service) throws IOException { - GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder(); - getAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); - service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback); - GetAuthsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); + public GetAuthsResponse call(VisibilityLabelsService service) throws IOException { + GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder(); + getAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); + service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback); + GetAuthsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - return response; - } - }; - Map result = ht.coprocessorService(VisibilityLabelsService.class, - HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable); - return result.values().iterator().next(); // There will be exactly one region for labels - // table and so one entry in result Map. - } finally { - if (ht != null) { - ht.close(); + }; + Map result = + table.coprocessorService(VisibilityLabelsService.class, + HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable); + return result.values().iterator().next(); // There will be exactly one region for labels + // table and so one entry in result Map. } } } @@ -177,44 +176,42 @@ public class VisibilityClient { private static VisibilityLabelsResponse setOrClearAuths(Configuration conf, final String[] auths, final String user, final boolean setOrClear) throws IOException, ServiceException, Throwable { - Table ht = null; - try { - ht = new HTable(conf, LABELS_TABLE_NAME); - Batch.Call callable = - new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); + // TODO: Make it so caller passes in a Connection rather than have us do this expensive + // setup each time. This class only used in test and shell at moment though. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(LABELS_TABLE_NAME)) { + Batch.Call callable = + new Batch.Call() { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); - public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { - SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder(); - setAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); - for (String auth : auths) { - if (auth.length() > 0) { - setAuthReqBuilder.addAuth(ByteStringer.wrap(Bytes.toBytes(auth))); + public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { + SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder(); + setAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); + for (String auth : auths) { + if (auth.length() > 0) { + setAuthReqBuilder.addAuth(ByteStringer.wrap(Bytes.toBytes(auth))); + } } + if (setOrClear) { + service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback); + } else { + service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback); + } + VisibilityLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - if (setOrClear) { - service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback); - } else { - service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback); - } - VisibilityLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; - Map result = ht.coprocessorService( - VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, - callable); - return result.values().iterator().next(); // There will be exactly one region for labels - // table and so one entry in result Map. - } finally { - if (ht != null) { - ht.close(); + }; + Map result = table.coprocessorService( + VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, + callable); + return result.values().iterator().next(); // There will be exactly one region for labels + // table and so one entry in result Map. } } } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java index aca6b27..e556bcb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java @@ -18,8 +18,9 @@ */ package org.apache.hadoop.hbase; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -53,6 +54,18 @@ public interface Server extends Abortable, Stoppable { HConnection getShortCircuitConnection(); /** + * Returns reference to a cluster connection. + * For Server services to share. + * + * Important note: this method returns reference to connection which is managed + * by Server itself, so callers must NOT attempt to close connection obtained. + * + * See {@link org.apache.hadoop.hbase.client.ConnectionUtils#createShortCircuitHConnection} + * for details on short-circuit connections. + */ + Connection getConnection(); + + /** * Returns instance of {@link org.apache.hadoop.hbase.zookeeper.MetaTableLocator} * running inside this server. This MetaServerLocator is started and stopped by server, clients * shouldn't manage it's lifecycle. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java index 660733d..aaf3ddf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java @@ -18,10 +18,12 @@ */ package org.apache.hadoop.hbase.client; -import com.google.protobuf.Descriptors.MethodDescriptor; -import com.google.protobuf.Message; -import com.google.protobuf.Service; -import com.google.protobuf.ServiceException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutorService; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; @@ -32,11 +34,10 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.io.MultipleIOException; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ExecutorService; +import com.google.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.Message; +import com.google.protobuf.Service; +import com.google.protobuf.ServiceException; /** * A wrapper for HTable. Can be used to restrict privilege. @@ -55,7 +56,7 @@ import java.util.concurrent.ExecutorService; public class HTableWrapper implements HTableInterface { private TableName tableName; - private HTable table; + private final Table table; private ClusterConnection connection; private final List openTables; @@ -73,7 +74,7 @@ public class HTableWrapper implements HTableInterface { ClusterConnection connection, ExecutorService pool) throws IOException { this.tableName = tableName; - this.table = new HTable(tableName, connection, pool); + this.table = connection.getTable(tableName, pool); this.connection = connection; this.openTables = openTables; this.openTables.add(this); @@ -82,7 +83,7 @@ public class HTableWrapper implements HTableInterface { public void internalClose() throws IOException { List exceptions = new ArrayList(2); try { - table.close(); + table.close(); } catch (IOException e) { exceptions.add(e); } @@ -114,7 +115,12 @@ public class HTableWrapper implements HTableInterface { @Deprecated public Result getRowOrBefore(byte[] row, byte[] family) throws IOException { - return table.getRowOrBefore(row, family); + Scan scan = Scan.createGetClosestRowOrBeforeReverseScan(row, family); + Result startRowResult = null; + try (ResultScanner resultScanner = this.table.getScanner(scan)) { + startRowResult = resultScanner.next(); + } + return startRowResult; } public Result get(Get get) throws IOException { @@ -130,8 +136,15 @@ public class HTableWrapper implements HTableInterface { } @Deprecated - public Boolean[] exists(List gets) throws IOException{ - return table.exists(gets); + public Boolean[] exists(List gets) throws IOException { + // Do convertion. + boolean [] exists = table.existsAll(gets); + if (exists == null) return null; + Boolean [] results = new Boolean [exists.length]; + for (int i = 0; i < exists.length; i++) { + results[i] = exists[i]? Boolean.TRUE: Boolean.FALSE; + } + return results; } public void put(Put put) throws IOException { @@ -296,12 +309,12 @@ public class HTableWrapper implements HTableInterface { @Override public void setAutoFlush(boolean autoFlush) { - table.setAutoFlush(autoFlush, autoFlush); + table.setAutoFlushTo(autoFlush); } @Override public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) { - table.setAutoFlush(autoFlush, clearBufferOnFail); + throw new UnsupportedOperationException(); } @Override @@ -322,7 +335,8 @@ public class HTableWrapper implements HTableInterface { @Override public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, boolean writeToWAL) throws IOException { - return table.incrementColumnValue(row, family, qualifier, amount, writeToWAL); + return table.incrementColumnValue(row, family, qualifier, amount, + writeToWAL? Durability.USE_DEFAULT: Durability.SKIP_WAL); } @Override @@ -346,4 +360,4 @@ public class HTableWrapper implements HTableInterface { CompareOp compareOp, byte[] value, RowMutations rm) throws IOException { return table.checkAndMutate(row, family, qualifier, compareOp, value, rm); } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java index 11acea0..be131e8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java @@ -22,11 +22,12 @@ import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; @@ -47,26 +48,28 @@ import org.apache.hadoop.mapred.Partitioner; public class HRegionPartitioner implements Partitioner { private static final Log LOG = LogFactory.getLog(HRegionPartitioner.class); - private RegionLocator table; + // Connection and locator are not cleaned up; they just die when partitioner is done. + private Connection connection; + private RegionLocator locator; private byte[][] startKeys; public void configure(JobConf job) { try { - this.table = new HTable(HBaseConfiguration.create(job), - TableName.valueOf(job.get(TableOutputFormat.OUTPUT_TABLE))); + this.connection = ConnectionFactory.createConnection(HBaseConfiguration.create(job)); + TableName tableName = TableName.valueOf(job.get(TableOutputFormat.OUTPUT_TABLE)); + this.locator = this.connection.getRegionLocator(tableName); } catch (IOException e) { LOG.error(e); } try { - this.startKeys = this.table.getStartKeys(); + this.startKeys = this.locator.getStartKeys(); } catch (IOException e) { LOG.error(e); } } - public int getPartition(ImmutableBytesWritable key, - V2 value, int numPartitions) { + public int getPartition(ImmutableBytesWritable key, V2 value, int numPartitions) { byte[] region = null; // Only one region return 0 if (this.startKeys.length == 1){ @@ -75,7 +78,7 @@ implements Partitioner { try { // Not sure if this is cached after a split so we could have problems // here if a region splits while mapping - region = table.getRegionLocation(key.get()).getRegionInfo().getStartKey(); + region = locator.getRegionLocation(key.get()).getRegionInfo().getStartKey(); } catch (IOException e) { LOG.error(e); } @@ -92,4 +95,4 @@ implements Partitioner { // if above fails to find start key that match we need to return something return 0; } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java index 0f03159..1afb9d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.MutationSerialization; @@ -211,7 +212,8 @@ public class TableMapReduceUtil { MutationSerialization.class.getName(), ResultSerialization.class.getName()); if (partitioner == HRegionPartitioner.class) { job.setPartitionerClass(HRegionPartitioner.class); - int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table); + int regions = + MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table)); if (job.getNumReduceTasks() > regions) { job.setNumReduceTasks(regions); } @@ -275,9 +277,11 @@ public class TableMapReduceUtil { * @param job The current job configuration to adjust. * @throws IOException When retrieving the table details fails. */ + // Used by tests. public static void limitNumReduceTasks(String table, JobConf job) throws IOException { - int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table); + int regions = + MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table)); if (job.getNumReduceTasks() > regions) job.setNumReduceTasks(regions); } @@ -290,9 +294,11 @@ public class TableMapReduceUtil { * @param job The current job configuration to adjust. * @throws IOException When retrieving the table details fails. */ + // Used by tests. public static void limitNumMapTasks(String table, JobConf job) throws IOException { - int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table); + int regions = + MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table)); if (job.getNumMapTasks() > regions) job.setNumMapTasks(regions); } @@ -307,7 +313,8 @@ public class TableMapReduceUtil { */ public static void setNumReduceTasks(String table, JobConf job) throws IOException { - job.setNumReduceTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table)); + job.setNumReduceTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), + TableName.valueOf(table))); } /** @@ -320,7 +327,8 @@ public class TableMapReduceUtil { */ public static void setNumMapTasks(String table, JobConf job) throws IOException { - job.setNumMapTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table)); + job.setNumMapTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), + TableName.valueOf(table))); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java index 5a5f544..8645d07 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java @@ -20,21 +20,19 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.mapred.FileOutputFormat; import org.apache.hadoop.mapred.InvalidJobConfException; import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.FileOutputFormat; import org.apache.hadoop.mapred.RecordWriter; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.util.Progressable; @@ -44,19 +42,16 @@ import org.apache.hadoop.util.Progressable; */ @InterfaceAudience.Public @InterfaceStability.Stable -public class TableOutputFormat extends -FileOutputFormat { +public class TableOutputFormat extends FileOutputFormat { /** JobConf parameter that specifies the output table */ public static final String OUTPUT_TABLE = "hbase.mapred.outputtable"; - private static final Log LOG = LogFactory.getLog(TableOutputFormat.class); /** * Convert Reduce output (key, value) to (HStoreKey, KeyedDataArrayWritable) * and write to an HBase table */ - protected static class TableRecordWriter - implements RecordWriter { + protected static class TableRecordWriter implements RecordWriter { private Table m_table; /** @@ -70,41 +65,36 @@ FileOutputFormat { public void close(Reporter reporter) throws IOException { - m_table.close(); + if (m_table != null) { + m_table.close(); + m_table = null; + } } - public void write(ImmutableBytesWritable key, - Put value) throws IOException { + public void write(ImmutableBytesWritable key, Put value) throws IOException { m_table.put(new Put(value)); } } @Override - @SuppressWarnings("unchecked") - public RecordWriter getRecordWriter(FileSystem ignored, - JobConf job, String name, Progressable progress) throws IOException { - - // expecting exactly one path - + public RecordWriter getRecordWriter(FileSystem ignored, JobConf job, String name, + Progressable progress) + throws IOException { TableName tableName = TableName.valueOf(job.get(OUTPUT_TABLE)); - HTable table = null; - try { - table = new HTable(HBaseConfiguration.create(job), tableName); - } catch(IOException e) { - LOG.error(e); - throw e; - } - table.setAutoFlush(false, true); + // Connection is not closed. Dies with JVM. No possibility for cleanup. + Connection connection = ConnectionFactory.createConnection(job); + Table table = connection.getTable(tableName); + // Clear write buffer on fail is true by default so no need to reset it. + table.setAutoFlushTo(false); return new TableRecordWriter(table); } @Override public void checkOutputSpecs(FileSystem ignored, JobConf job) throws FileAlreadyExistsException, InvalidJobConfException, IOException { - String tableName = job.get(OUTPUT_TABLE); - if(tableName == null) { + if (tableName == null) { throw new IOException("Must specify table name"); } } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java index 150bb25..deb59c4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java @@ -28,11 +28,12 @@ import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.Tag; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; @@ -66,44 +67,55 @@ public class DefaultVisibilityExpressionResolver implements VisibilityExpression @Override public void init() { // Reading all the labels and ordinal. - // This scan should be done by user with global_admin previliges.. Ensure that it works + // This scan should be done by user with global_admin privileges.. Ensure that it works Table labelsTable = null; + Connection connection = null; try { - labelsTable = new HTable(conf, LABELS_TABLE_NAME); - } catch (TableNotFoundException e) { - // Just return with out doing any thing. When the VC is not used we wont be having 'labels' - // table in the cluster. - return; - } catch (IOException e) { - LOG.error("Error opening 'labels' table", e); - return; - } - Scan scan = new Scan(); - scan.setAuthorizations(new Authorizations(VisibilityUtils.SYSTEM_LABEL)); - scan.addColumn(LABELS_TABLE_FAMILY, LABEL_QUALIFIER); - ResultScanner scanner = null; - try { - scanner = labelsTable.getScanner(scan); - Result next = null; - while ((next = scanner.next()) != null) { - byte[] row = next.getRow(); - byte[] value = next.getValue(LABELS_TABLE_FAMILY, LABEL_QUALIFIER); - labels.put(Bytes.toString(value), Bytes.toInt(row)); + connection = ConnectionFactory.createConnection(conf); + try { + labelsTable = connection.getTable(LABELS_TABLE_NAME); + } catch (TableNotFoundException e) { + // Just return with out doing any thing. When the VC is not used we wont be having 'labels' + // table in the cluster. + return; + } catch (IOException e) { + LOG.error("Error opening 'labels' table", e); + return; } - } catch (IOException e) { - LOG.error("Error reading 'labels' table", e); - } finally { + Scan scan = new Scan(); + scan.setAuthorizations(new Authorizations(VisibilityUtils.SYSTEM_LABEL)); + scan.addColumn(LABELS_TABLE_FAMILY, LABEL_QUALIFIER); + ResultScanner scanner = null; try { - if (scanner != null) { - scanner.close(); + scanner = labelsTable.getScanner(scan); + Result next = null; + while ((next = scanner.next()) != null) { + byte[] row = next.getRow(); + byte[] value = next.getValue(LABELS_TABLE_FAMILY, LABEL_QUALIFIER); + labels.put(Bytes.toString(value), Bytes.toInt(row)); } + } catch (IOException e) { + LOG.error("Error scanning 'labels' table", e); } finally { + if (scanner != null) scanner.close(); + } + } catch (IOException ioe) { + LOG.error("Failed reading 'labels' tags", ioe); + return; + } finally { + if (labelsTable != null) { try { labelsTable.close(); - } catch (IOException e) { - LOG.warn("Error on closing 'labels' table", e); + } catch (IOException ioe) { + LOG.warn("Error closing 'labels' table", ioe); } } + if (connection != null) + try { + connection.close(); + } catch (IOException ioe) { + LOG.warn("Failed close of temporary connection", ioe); + } } } @@ -117,4 +129,4 @@ public class DefaultVisibilityExpressionResolver implements VisibilityExpression }; return VisibilityUtils.createVisibilityExpTags(visExpression, true, false, null, provider); } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index f69be50..ebe7d36 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -34,8 +34,6 @@ import java.util.zip.ZipFile; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -43,10 +41,11 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.security.User; @@ -662,7 +661,7 @@ public class TableMapReduceUtil { job.setOutputValueClass(Writable.class); if (partitioner == HRegionPartitioner.class) { job.setPartitionerClass(HRegionPartitioner.class); - int regions = MetaTableAccessor.getRegionCount(conf, table); + int regions = MetaTableAccessor.getRegionCount(conf, TableName.valueOf(table)); if (job.getNumReduceTasks() > regions) { job.setNumReduceTasks(regions); } @@ -687,7 +686,8 @@ public class TableMapReduceUtil { */ public static void limitNumReduceTasks(String table, Job job) throws IOException { - int regions = MetaTableAccessor.getRegionCount(job.getConfiguration(), table); + int regions = + MetaTableAccessor.getRegionCount(job.getConfiguration(), TableName.valueOf(table)); if (job.getNumReduceTasks() > regions) job.setNumReduceTasks(regions); } @@ -702,7 +702,8 @@ public class TableMapReduceUtil { */ public static void setNumReduceTasks(String table, Job job) throws IOException { - job.setNumReduceTasks(MetaTableAccessor.getRegionCount(job.getConfiguration(), table)); + job.setNumReduceTasks(MetaTableAccessor.getRegionCount(job.getConfiguration(), + TableName.valueOf(table))); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java index da40b2e..0bdb0ba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java @@ -86,11 +86,8 @@ implements Configurable { /** * Writes the reducer output to an HBase table. - * - * @param The type of the key. */ - protected class TableRecordWriter - extends RecordWriter { + protected class TableRecordWriter extends RecordWriter { /** * Closes the writer, in this case flush table commits. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index 886991c..13d7a1a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -29,18 +29,19 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Chore; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.HFileArchiver; -import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.MetaScanner; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.client.Result; @@ -62,6 +63,7 @@ public class CatalogJanitor extends Chore { private final MasterServices services; private AtomicBoolean enabled = new AtomicBoolean(true); private AtomicBoolean alreadyRunning = new AtomicBoolean(false); + private final Connection connection; CatalogJanitor(final Server server, final MasterServices services) { super("CatalogJanitor-" + server.getServerName().toShortString(), @@ -69,6 +71,7 @@ public class CatalogJanitor extends Chore { server); this.server = server; this.services = services; + this.connection = server.getConnection(); } @Override @@ -163,7 +166,7 @@ public class CatalogJanitor extends Chore { // Run full scan of hbase:meta catalog table passing in our custom visitor with // the start row - MetaScanner.metaScan(server.getConfiguration(), null, visitor, tableName); + MetaScanner.metaScan(server.getConfiguration(), this.connection, visitor, tableName); return new Triple, Map>( count.get(), mergedRegions, splitParents); @@ -331,7 +334,7 @@ public class CatalogJanitor extends Chore { FileSystem fs = this.services.getMasterFileSystem().getFileSystem(); if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + parent); HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent); - MetaTableAccessor.deleteRegion(this.server.getShortCircuitConnection(), parent); + MetaTableAccessor.deleteRegion(this.connection, parent); result = true; } return result; @@ -420,4 +423,4 @@ public class CatalogJanitor extends Chore { return cleanMergeRegion(region, mergeRegions.getFirst(), mergeRegions.getSecond()); } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 3437f34..986ff66 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.MetaScanner; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; @@ -128,6 +129,7 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.Watcher; +import org.codehaus.jackson.map.Module.SetupContext; import org.mortbay.jetty.Connector; import org.mortbay.jetty.nio.SelectChannelConnector; import org.mortbay.jetty.servlet.Context; @@ -538,12 +540,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId()); this.serverManager = createServerManager(this, this); - synchronized (this) { - if (shortCircuitConnection == null) { - shortCircuitConnection = createShortCircuitConnection(); - metaTableLocator = new MetaTableLocator(); - } - } + setupClusterConnections(); // Invalidate all write locks held previously this.tableLockManager.reapWriteLocks(); @@ -1492,6 +1489,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { * is found, but not currently deployed, the second element of the pair * may be null. */ + @VisibleForTesting // Used by TestMaster. Pair getTableRegionForRow( final TableName tableName, final byte [] rowKey) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java index 01c1f89..c884806 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java @@ -25,21 +25,21 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Random; import java.util.Set; -import java.util.Map.Entry; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.RackManager; @@ -121,12 +121,14 @@ public class FavoredNodeAssignmentHelper { } } // Write the region assignments to the meta table. - Table metaTable = null; - try { - metaTable = new HTable(conf, TableName.META_TABLE_NAME); - metaTable.put(puts); - } finally { - if (metaTable != null) metaTable.close(); + // TODO: See above overrides take a Connection rather than a Configuration only the + // Connection is a short circuit connection. That is not going to good in all cases, when + // master and meta are not colocated. Fix when this favored nodes feature is actually used + // someday. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { + metaTable.put(puts); + } } LOG.info("Added " + puts.size() + " regions in META"); } @@ -304,7 +306,6 @@ public class FavoredNodeAssignmentHelper { * primary/secondary/tertiary RegionServers * @param primaryRSMap * @return the map of regions to the servers the region-files should be hosted on - * @throws IOException */ public Map placeSecondaryAndTertiaryWithRestrictions( Map primaryRSMap) { @@ -603,4 +604,4 @@ public class FavoredNodeAssignmentHelper { } return strBuf.toString(); } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java index a778c26..d70484d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java @@ -25,15 +25,14 @@ import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableDescriptor; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; @@ -44,7 +43,6 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.Bytes; @InterfaceAudience.Private @@ -101,19 +99,14 @@ public class ModifyTableHandler extends TableEventHandler { Set tableRows = new HashSet(); Scan scan = MetaTableAccessor.getScanForTableName(table); scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); - Table htable = null; - try { - htable = new HTable(masterServices.getConfiguration(), TableName.META_TABLE_NAME); - ResultScanner resScanner = htable.getScanner(scan); + Connection connection = this.masterServices.getConnection(); + try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { + ResultScanner resScanner = metaTable.getScanner(scan); for (Result result : resScanner) { tableRows.add(result.getRow()); } MetaTableAccessor.removeRegionReplicasFromMeta(tableRows, newReplicaCount, - oldReplicaCount - newReplicaCount, masterServices.getShortCircuitConnection()); - } finally { - if (htable != null) { - htable.close(); - } + oldReplicaCount - newReplicaCount, masterServices.getShortCircuitConnection()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java index 66c45a4..d157dc9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java @@ -28,19 +28,21 @@ import java.util.TreeMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.CoordinatedStateException; -import org.apache.hadoop.hbase.TableDescriptor; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; @@ -48,10 +50,10 @@ import org.apache.hadoop.hbase.master.BulkReOpen; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; /** * Base class for performing operations against tables. @@ -175,32 +177,32 @@ public abstract class TableEventHandler extends EventHandler { public boolean reOpenAllRegions(List regions) throws IOException { boolean done = false; LOG.info("Bucketing regions by region server..."); - HTable table = new HTable(masterServices.getConfiguration(), tableName); - TreeMap> serverToRegions = Maps - .newTreeMap(); - NavigableMap hriHserverMapping; - try { - hriHserverMapping = table.getRegionLocations(); - } finally { - table.close(); + List regionLocations = null; + Connection connection = this.masterServices.getConnection(); + try (RegionLocator locator = connection.getRegionLocator(tableName)) { + regionLocations = locator.getAllRegionLocations(); } - + // Convert List to Map. + NavigableMap hri2Sn = new TreeMap(); + for (HRegionLocation location: regionLocations) { + hri2Sn.put(location.getRegionInfo(), location.getServerName()); + } + TreeMap> serverToRegions = Maps.newTreeMap(); List reRegions = new ArrayList(); for (HRegionInfo hri : regions) { - ServerName rsLocation = hriHserverMapping.get(hri); - + ServerName sn = hri2Sn.get(hri); // Skip the offlined split parent region // See HBASE-4578 for more information. - if (null == rsLocation) { + if (null == sn) { LOG.info("Skip " + hri); continue; } - if (!serverToRegions.containsKey(rsLocation)) { + if (!serverToRegions.containsKey(sn)) { LinkedList hriList = Lists.newLinkedList(); - serverToRegions.put(rsLocation, hriList); + serverToRegions.put(sn, hriList); } reRegions.add(hri); - serverToRegions.get(rsLocation).add(hri); + serverToRegions.get(sn).add(hri); } LOG.info("Reopening " + reRegions.size() + " regions on " diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java index 885862c..ecd54df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java @@ -23,31 +23,21 @@ import java.util.HashSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; -import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; -import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.handler.CreateTableHandler; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle; -import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest; -import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleType; -import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaScope; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeUnit; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota; /** * Master Quota Manager. @@ -101,10 +91,6 @@ public class MasterQuotaManager { return enabled; } - private Configuration getConfiguration() { - return masterServices.getConfiguration(); - } - /* ========================================================================== * Admin operations to manage the quota table */ @@ -152,15 +138,15 @@ public class MasterQuotaManager { setQuota(req, new SetQuotaOperations() { @Override public Quotas fetch() throws IOException { - return QuotaUtil.getUserQuota(getConfiguration(), userName); + return QuotaUtil.getUserQuota(masterServices.getConnection(), userName); } @Override public void update(final Quotas quotas) throws IOException { - QuotaUtil.addUserQuota(getConfiguration(), userName, quotas); + QuotaUtil.addUserQuota(masterServices.getConnection(), userName, quotas); } @Override public void delete() throws IOException { - QuotaUtil.deleteUserQuota(masterServices.getConfiguration(), userName); + QuotaUtil.deleteUserQuota(masterServices.getConnection(), userName); } @Override public void preApply(final Quotas quotas) throws IOException { @@ -178,15 +164,15 @@ public class MasterQuotaManager { setQuota(req, new SetQuotaOperations() { @Override public Quotas fetch() throws IOException { - return QuotaUtil.getUserQuota(getConfiguration(), userName, table); + return QuotaUtil.getUserQuota(masterServices.getConnection(), userName, table); } @Override public void update(final Quotas quotas) throws IOException { - QuotaUtil.addUserQuota(getConfiguration(), userName, table, quotas); + QuotaUtil.addUserQuota(masterServices.getConnection(), userName, table, quotas); } @Override public void delete() throws IOException { - QuotaUtil.deleteUserQuota(masterServices.getConfiguration(), userName, table); + QuotaUtil.deleteUserQuota(masterServices.getConnection(), userName, table); } @Override public void preApply(final Quotas quotas) throws IOException { @@ -204,15 +190,15 @@ public class MasterQuotaManager { setQuota(req, new SetQuotaOperations() { @Override public Quotas fetch() throws IOException { - return QuotaUtil.getUserQuota(getConfiguration(), userName, namespace); + return QuotaUtil.getUserQuota(masterServices.getConnection(), userName, namespace); } @Override public void update(final Quotas quotas) throws IOException { - QuotaUtil.addUserQuota(getConfiguration(), userName, namespace, quotas); + QuotaUtil.addUserQuota(masterServices.getConnection(), userName, namespace, quotas); } @Override public void delete() throws IOException { - QuotaUtil.deleteUserQuota(masterServices.getConfiguration(), userName, namespace); + QuotaUtil.deleteUserQuota(masterServices.getConnection(), userName, namespace); } @Override public void preApply(final Quotas quotas) throws IOException { @@ -230,15 +216,15 @@ public class MasterQuotaManager { setQuota(req, new SetQuotaOperations() { @Override public Quotas fetch() throws IOException { - return QuotaUtil.getTableQuota(getConfiguration(), table); + return QuotaUtil.getTableQuota(masterServices.getConnection(), table); } @Override public void update(final Quotas quotas) throws IOException { - QuotaUtil.addTableQuota(getConfiguration(), table, quotas); + QuotaUtil.addTableQuota(masterServices.getConnection(), table, quotas); } @Override public void delete() throws IOException { - QuotaUtil.deleteTableQuota(getConfiguration(), table); + QuotaUtil.deleteTableQuota(masterServices.getConnection(), table); } @Override public void preApply(final Quotas quotas) throws IOException { @@ -256,15 +242,15 @@ public class MasterQuotaManager { setQuota(req, new SetQuotaOperations() { @Override public Quotas fetch() throws IOException { - return QuotaUtil.getNamespaceQuota(getConfiguration(), namespace); + return QuotaUtil.getNamespaceQuota(masterServices.getConnection(), namespace); } @Override public void update(final Quotas quotas) throws IOException { - QuotaUtil.addNamespaceQuota(getConfiguration(), namespace, quotas); + QuotaUtil.addNamespaceQuota(masterServices.getConnection(), namespace, quotas); } @Override public void delete() throws IOException { - QuotaUtil.deleteNamespaceQuota(getConfiguration(), namespace); + QuotaUtil.deleteNamespaceQuota(masterServices.getConnection(), namespace); } @Override public void preApply(final Quotas quotas) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java index c44a737..8cd402d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaCache.java @@ -23,19 +23,16 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentSkipListSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -240,7 +237,7 @@ public class QuotaCache implements Stoppable { @Override public Map fetchEntries(final List gets) throws IOException { - return QuotaUtil.fetchNamespaceQuotas(QuotaCache.this.getConfiguration(), gets); + return QuotaUtil.fetchNamespaceQuotas(rsServices.getConnection(), gets); } }); } @@ -255,7 +252,7 @@ public class QuotaCache implements Stoppable { @Override public Map fetchEntries(final List gets) throws IOException { - return QuotaUtil.fetchTableQuotas(QuotaCache.this.getConfiguration(), gets); + return QuotaUtil.fetchTableQuotas(rsServices.getConnection(), gets); } }); } @@ -272,7 +269,7 @@ public class QuotaCache implements Stoppable { @Override public Map fetchEntries(final List gets) throws IOException { - return QuotaUtil.fetchUserQuotas(QuotaCache.this.getConfiguration(), gets); + return QuotaUtil.fetchUserQuotas(rsServices.getConnection(), gets); } }); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java index 5db30eb..c4d76bc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java @@ -19,15 +19,12 @@ package org.apache.hadoop.hbase.quotas; import java.io.IOException; - import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -35,18 +32,19 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.security.UserGroupInformation; /** * Helper class to interact with the quota table @@ -85,90 +83,89 @@ public class QuotaUtil extends QuotaTableUtil { /* ========================================================================= * Quota "settings" helpers */ - public static void addTableQuota(final Configuration conf, final TableName table, + public static void addTableQuota(final Connection connection, final TableName table, final Quotas data) throws IOException { - addQuotas(conf, getTableRowKey(table), data); + addQuotas(connection, getTableRowKey(table), data); } - public static void deleteTableQuota(final Configuration conf, final TableName table) + public static void deleteTableQuota(final Connection connection, final TableName table) throws IOException { - deleteQuotas(conf, getTableRowKey(table)); + deleteQuotas(connection, getTableRowKey(table)); } - public static void addNamespaceQuota(final Configuration conf, final String namespace, + public static void addNamespaceQuota(final Connection connection, final String namespace, final Quotas data) throws IOException { - addQuotas(conf, getNamespaceRowKey(namespace), data); + addQuotas(connection, getNamespaceRowKey(namespace), data); } - public static void deleteNamespaceQuota(final Configuration conf, final String namespace) + public static void deleteNamespaceQuota(final Connection connection, final String namespace) throws IOException { - deleteQuotas(conf, getNamespaceRowKey(namespace)); + deleteQuotas(connection, getNamespaceRowKey(namespace)); } - public static void addUserQuota(final Configuration conf, final String user, + public static void addUserQuota(final Connection connection, final String user, final Quotas data) throws IOException { - addQuotas(conf, getUserRowKey(user), data); + addQuotas(connection, getUserRowKey(user), data); } - public static void addUserQuota(final Configuration conf, final String user, + public static void addUserQuota(final Connection connection, final String user, final TableName table, final Quotas data) throws IOException { - addQuotas(conf, getUserRowKey(user), - getSettingsQualifierForUserTable(table), data); + addQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserTable(table), data); } - public static void addUserQuota(final Configuration conf, final String user, + public static void addUserQuota(final Connection connection, final String user, final String namespace, final Quotas data) throws IOException { - addQuotas(conf, getUserRowKey(user), + addQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserNamespace(namespace), data); } - public static void deleteUserQuota(final Configuration conf, final String user) + public static void deleteUserQuota(final Connection connection, final String user) throws IOException { - deleteQuotas(conf, getUserRowKey(user)); + deleteQuotas(connection, getUserRowKey(user)); } - public static void deleteUserQuota(final Configuration conf, final String user, + public static void deleteUserQuota(final Connection connection, final String user, final TableName table) throws IOException { - deleteQuotas(conf, getUserRowKey(user), + deleteQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserTable(table)); } - public static void deleteUserQuota(final Configuration conf, final String user, + public static void deleteUserQuota(final Connection connection, final String user, final String namespace) throws IOException { - deleteQuotas(conf, getUserRowKey(user), + deleteQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserNamespace(namespace)); } - private static void addQuotas(final Configuration conf, final byte[] rowKey, + private static void addQuotas(final Connection connection, final byte[] rowKey, final Quotas data) throws IOException { - addQuotas(conf, rowKey, QUOTA_QUALIFIER_SETTINGS, data); + addQuotas(connection, rowKey, QUOTA_QUALIFIER_SETTINGS, data); } - private static void addQuotas(final Configuration conf, final byte[] rowKey, + private static void addQuotas(final Connection connection, final byte[] rowKey, final byte[] qualifier, final Quotas data) throws IOException { Put put = new Put(rowKey); put.add(QUOTA_FAMILY_INFO, qualifier, quotasToData(data)); - doPut(conf, put); + doPut(connection, put); } - private static void deleteQuotas(final Configuration conf, final byte[] rowKey) + private static void deleteQuotas(final Connection connection, final byte[] rowKey) throws IOException { - deleteQuotas(conf, rowKey, null); + deleteQuotas(connection, rowKey, null); } - private static void deleteQuotas(final Configuration conf, final byte[] rowKey, + private static void deleteQuotas(final Connection connection, final byte[] rowKey, final byte[] qualifier) throws IOException { Delete delete = new Delete(rowKey); if (qualifier != null) { delete.deleteColumns(QUOTA_FAMILY_INFO, qualifier); } - doDelete(conf, delete); + doDelete(connection, delete); } - public static Map fetchUserQuotas(final Configuration conf, + public static Map fetchUserQuotas(final Connection connection, final List gets) throws IOException { long nowTs = EnvironmentEdgeManager.currentTime(); - Result[] results = doGet(conf, gets); + Result[] results = doGet(connection, gets); Map userQuotas = new HashMap(results.length); for (int i = 0; i < results.length; ++i) { @@ -207,9 +204,9 @@ public class QuotaUtil extends QuotaTableUtil { return userQuotas; } - public static Map fetchTableQuotas(final Configuration conf, + public static Map fetchTableQuotas(final Connection connection, final List gets) throws IOException { - return fetchGlobalQuotas("table", conf, gets, new KeyFromRow() { + return fetchGlobalQuotas("table", connection, gets, new KeyFromRow() { @Override public TableName getKeyFromRow(final byte[] row) { assert isTableRowKey(row); @@ -218,9 +215,9 @@ public class QuotaUtil extends QuotaTableUtil { }); } - public static Map fetchNamespaceQuotas(final Configuration conf, + public static Map fetchNamespaceQuotas(final Connection connection, final List gets) throws IOException { - return fetchGlobalQuotas("namespace", conf, gets, new KeyFromRow() { + return fetchGlobalQuotas("namespace", connection, gets, new KeyFromRow() { @Override public String getKeyFromRow(final byte[] row) { assert isNamespaceRowKey(row); @@ -230,9 +227,9 @@ public class QuotaUtil extends QuotaTableUtil { } public static Map fetchGlobalQuotas(final String type, - final Configuration conf, final List gets, final KeyFromRow kfr) throws IOException { + final Connection connection, final List gets, final KeyFromRow kfr) throws IOException { long nowTs = EnvironmentEdgeManager.currentTime(); - Result[] results = doGet(conf, gets); + Result[] results = doGet(connection, gets); Map globalQuotas = new HashMap(results.length); for (int i = 0; i < results.length; ++i) { @@ -266,23 +263,17 @@ public class QuotaUtil extends QuotaTableUtil { /* ========================================================================= * HTable helpers */ - private static void doPut(final Configuration conf, final Put put) - throws IOException { - HTable table = new HTable(conf, QuotaUtil.QUOTA_TABLE_NAME); - try { + private static void doPut(final Connection connection, final Put put) + throws IOException { + try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) { table.put(put); - } finally { - table.close(); } } - private static void doDelete(final Configuration conf, final Delete delete) - throws IOException { - HTable table = new HTable(conf, QuotaUtil.QUOTA_TABLE_NAME); - try { + private static void doDelete(final Connection connection, final Delete delete) + throws IOException { + try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) { table.delete(delete); - } finally { - table.close(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 45e5558..4eb6afa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -73,6 +73,8 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.YouAreDeadException; import org.apache.hadoop.hbase.ZNodeClearer; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; @@ -211,6 +213,11 @@ public class HRegionServer extends HasThread implements */ protected HConnection shortCircuitConnection; + /** + * A cluster connection shared by utilities that need to talk out on the cluster. + */ + protected Connection connection; + /* * Long-living meta table locator, which is created when the server is started and stopped * when server shuts down. References to this locator shall be used to perform according @@ -609,7 +616,7 @@ public class HRegionServer extends HasThread implements */ protected HConnection createShortCircuitConnection() throws IOException { return ConnectionUtils.createShortCircuitHConnection( - HConnectionManager.getConnection(conf), serverName, rpcServices, rpcServices); + ConnectionFactory.createConnection(conf), serverName, rpcServices, rpcServices); } /** @@ -633,6 +640,16 @@ public class HRegionServer extends HasThread implements return this.clusterId; } + protected synchronized void setupClusterConnections() throws IOException { + if (shortCircuitConnection == null) { + shortCircuitConnection = createShortCircuitConnection(); + metaTableLocator = new MetaTableLocator(); + } + if (this.connection == null) { + this.connection = ConnectionFactory.createConnection(this.conf); + } + } + /** * All initialization needed before we go register with Master. * @@ -641,12 +658,7 @@ public class HRegionServer extends HasThread implements */ private void preRegistrationInitialization(){ try { - synchronized (this) { - if (shortCircuitConnection == null) { - shortCircuitConnection = createShortCircuitConnection(); - metaTableLocator = new MetaTableLocator(); - } - } + setupClusterConnections(); // Health checker thread. if (isHealthCheckerConfigured()) { @@ -950,7 +962,14 @@ public class HRegionServer extends HasThread implements } catch (IOException e) { // Although the {@link Closeable} interface throws an {@link // IOException}, in reality, the implementation would never do that. - LOG.error("Attempt to close server's short circuit HConnection failed.", e); + LOG.warn("Attempt to close server's short circuit HConnection failed.", e); + } + } + if (this.connection != null && !this.connection.isClosed()) { + try { + this.connection.close(); + } catch (IOException e) { + LOG.warn("Failed lose connection", e); } } @@ -1769,6 +1788,11 @@ public class HRegionServer extends HasThread implements } @Override + public Connection getConnection() { + return this.connection; + } + + @Override public MetaTableLocator getMetaTableLocator() { return this.metaTableLocator; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java index 0408231..70d7023 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.CoordinatedStateManager; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; @@ -181,5 +182,11 @@ public class ReplicationSyncUp extends Configured implements Tool { public boolean isStopped() { return false; } + + @Override + public Connection getConnection() { + // TODO Auto-generated method stub + return null; + } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java index 5ca1164..4af28b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java @@ -33,7 +33,6 @@ import java.util.TreeSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -44,9 +43,11 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -170,12 +171,11 @@ public class AccessControlLists { Bytes.toString(key)+": "+Bytes.toStringBinary(value) ); } - Table acls = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - acls.put(p); - } finally { - if (acls != null) acls.close(); + // TODO: Pass in a Connection rather than create one each time. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + table.put(p); + } } } @@ -200,13 +200,12 @@ public class AccessControlLists { if (LOG.isDebugEnabled()) { LOG.debug("Removing permission "+ userPerm.toString()); } - d.deleteColumns(ACL_LIST_FAMILY, key); - Table acls = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - acls.delete(d); - } finally { - if (acls != null) acls.close(); + d.addColumns(ACL_LIST_FAMILY, key); + // TODO: Pass in a Connection rather than create one each time. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + table.delete(d); + } } } @@ -220,13 +219,11 @@ public class AccessControlLists { if (LOG.isDebugEnabled()) { LOG.debug("Removing permissions of removed table "+ tableName); } - - Table acls = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - acls.delete(d); - } finally { - if (acls != null) acls.close(); + // TODO: Pass in a Connection rather than create one each time. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + table.delete(d); + } } } @@ -241,12 +238,10 @@ public class AccessControlLists { LOG.debug("Removing permissions of removed namespace "+ namespace); } - Table acls = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - acls.delete(d); - } finally { - if (acls != null) acls.close(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + table.delete(d); + } } } @@ -260,41 +255,38 @@ public class AccessControlLists { LOG.debug("Removing permissions of removed column " + Bytes.toString(column) + " from table "+ tableName); } - - Table acls = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - - Scan scan = new Scan(); - scan.addFamily(ACL_LIST_FAMILY); - - String columnName = Bytes.toString(column); - scan.setFilter(new QualifierFilter(CompareOp.EQUAL, new RegexStringComparator( - String.format("(%s%s%s)|(%s%s)$", - ACL_KEY_DELIMITER, columnName, ACL_KEY_DELIMITER, - ACL_KEY_DELIMITER, columnName)))); - - Set qualifierSet = new TreeSet(Bytes.BYTES_COMPARATOR); - ResultScanner scanner = acls.getScanner(scan); - try { - for (Result res : scanner) { - for (byte[] q : res.getFamilyMap(ACL_LIST_FAMILY).navigableKeySet()) { - qualifierSet.add(q); + // TODO: Pass in a Connection rather than create one each time. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + Scan scan = new Scan(); + scan.addFamily(ACL_LIST_FAMILY); + + String columnName = Bytes.toString(column); + scan.setFilter(new QualifierFilter(CompareOp.EQUAL, new RegexStringComparator( + String.format("(%s%s%s)|(%s%s)$", + ACL_KEY_DELIMITER, columnName, ACL_KEY_DELIMITER, + ACL_KEY_DELIMITER, columnName)))); + + Set qualifierSet = new TreeSet(Bytes.BYTES_COMPARATOR); + ResultScanner scanner = table.getScanner(scan); + try { + for (Result res : scanner) { + for (byte[] q : res.getFamilyMap(ACL_LIST_FAMILY).navigableKeySet()) { + qualifierSet.add(q); + } } + } finally { + scanner.close(); } - } finally { - scanner.close(); - } - if (qualifierSet.size() > 0) { - Delete d = new Delete(tableName.getName()); - for (byte[] qualifier : qualifierSet) { - d.deleteColumns(ACL_LIST_FAMILY, qualifier); + if (qualifierSet.size() > 0) { + Delete d = new Delete(tableName.getName()); + for (byte[] qualifier : qualifierSet) { + d.addColumns(ACL_LIST_FAMILY, qualifier); + } + table.delete(d); } - acls.delete(d); } - } finally { - if (acls != null) acls.close(); } } @@ -422,19 +414,20 @@ public class AccessControlLists { Scan scan = new Scan(); scan.addFamily(ACL_LIST_FAMILY); - Table acls = null; ResultScanner scanner = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - scanner = acls.getScanner(scan); - for (Result row : scanner) { - ListMultimap resultPerms = - parsePermissions(row.getRow(), row); - allPerms.put(row.getRow(), resultPerms); + // TODO: Pass in a Connection rather than create one each time. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + scanner = table.getScanner(scan); + try { + for (Result row : scanner) { + ListMultimap resultPerms = parsePermissions(row.getRow(), row); + allPerms.put(row.getRow(), resultPerms); + } + } finally { + if (scanner != null) scanner.close(); + } } - } finally { - if (scanner != null) scanner.close(); - if (acls != null) acls.close(); } return allPerms; @@ -465,20 +458,19 @@ public class AccessControlLists { // for normal user tables, we just read the table row from _acl_ ListMultimap perms = ArrayListMultimap.create(); - Table acls = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - Get get = new Get(entryName); - get.addFamily(ACL_LIST_FAMILY); - Result row = acls.get(get); - if (!row.isEmpty()) { - perms = parsePermissions(entryName, row); - } else { - LOG.info("No permissions found in " + ACL_TABLE_NAME + " for acl entry " - + Bytes.toString(entryName)); + // TODO: Pass in a Connection rather than create one each time. + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + Get get = new Get(entryName); + get.addFamily(ACL_LIST_FAMILY); + Result row = table.get(get); + if (!row.isEmpty()) { + perms = parsePermissions(entryName, row); + } else { + LOG.info("No permissions found in " + ACL_TABLE_NAME + " for acl entry " + + Bytes.toString(entryName)); + } } - } finally { - if (acls != null) acls.close(); } return perms; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java index 3a37049..e3c4f53 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java @@ -23,13 +23,15 @@ import java.lang.reflect.UndeclaredThrowableException; import java.security.PrivilegedExceptionAction; import com.google.protobuf.ServiceException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -45,6 +47,7 @@ import org.apache.hadoop.security.token.Token; */ @InterfaceAudience.Private public class TokenUtil { + // This class is referenced indirectly by User out in common; instances are created by reflection private static Log LOG = LogFactory.getLog(TokenUtil.class); /** @@ -54,21 +57,19 @@ public class TokenUtil { */ public static Token obtainToken( Configuration conf) throws IOException { - Table meta = null; - try { - meta = new HTable(conf, TableName.META_TABLE_NAME); - CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW); - AuthenticationProtos.AuthenticationService.BlockingInterface service = - AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel); - AuthenticationProtos.GetAuthenticationTokenResponse response = service.getAuthenticationToken(null, - AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance()); + // TODO: Pass in a Connection to used. Will this even work? + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table meta = connection.getTable(TableName.META_TABLE_NAME)) { + CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW); + AuthenticationProtos.AuthenticationService.BlockingInterface service = + AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel); + AuthenticationProtos.GetAuthenticationTokenResponse response = + service.getAuthenticationToken(null, + AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance()); - return ProtobufUtil.toToken(response.getToken()); - } catch (ServiceException se) { - ProtobufUtil.toIOException(se); - } finally { - if (meta != null) { - meta.close(); + return ProtobufUtil.toToken(response.getToken()); + } catch (ServiceException se) { + ProtobufUtil.toIOException(se); } } // dummy return for ServiceException catch block diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index 539ba70..23673b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.tool; +import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -39,15 +40,17 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; @@ -228,51 +231,59 @@ public final class Canary implements Tool { } } - // start to prepare the stuffs + // Start to prepare the stuffs Monitor monitor = null; Thread monitorThread = null; long startTime = 0; long currentTimeLength = 0; + // Get a connection to use in below. + // try-with-resources jdk7 construct. See + // http://docs.oracle.com/javase/tutorial/essential/exceptions/tryResourceClose.html + try (Connection connection = ConnectionFactory.createConnection(this.conf)) { + do { + // Do monitor !! + try { + monitor = this.newMonitor(connection, index, args); + monitorThread = new Thread(monitor); + startTime = System.currentTimeMillis(); + monitorThread.start(); + while (!monitor.isDone()) { + // wait for 1 sec + Thread.sleep(1000); + // exit if any error occurs + if (this.failOnError && monitor.hasError()) { + monitorThread.interrupt(); + if (monitor.initialized) { + System.exit(monitor.errorCode); + } else { + System.exit(INIT_ERROR_EXIT_CODE); + } + } + currentTimeLength = System.currentTimeMillis() - startTime; + if (currentTimeLength > this.timeout) { + LOG.error("The monitor is running too long (" + currentTimeLength + + ") after timeout limit:" + this.timeout + + " will be killed itself !!"); + if (monitor.initialized) { + System.exit(TIMEOUT_ERROR_EXIT_CODE); + } else { + System.exit(INIT_ERROR_EXIT_CODE); + } + break; + } + } - do { - // do monitor !! - monitor = this.newMonitor(index, args); - monitorThread = new Thread(monitor); - startTime = System.currentTimeMillis(); - monitorThread.start(); - while (!monitor.isDone()) { - // wait for 1 sec - Thread.sleep(1000); - // exit if any error occurs - if (this.failOnError && monitor.hasError()) { - monitorThread.interrupt(); - if (monitor.initialized) { + if (this.failOnError && monitor.hasError()) { + monitorThread.interrupt(); System.exit(monitor.errorCode); - } else { - System.exit(INIT_ERROR_EXIT_CODE); - } - } - currentTimeLength = System.currentTimeMillis() - startTime; - if (currentTimeLength > this.timeout) { - LOG.error("The monitor is running too long (" + currentTimeLength - + ") after timeout limit:" + this.timeout - + " will be killed itself !!"); - if (monitor.initialized) { - System.exit(TIMEOUT_ERROR_EXIT_CODE); - } else { - System.exit(INIT_ERROR_EXIT_CODE); } - break; + } finally { + if (monitor != null) monitor.close(); } - } - - if (this.failOnError && monitor.hasError()) { - monitorThread.interrupt(); - System.exit(monitor.errorCode); - } - Thread.sleep(interval); - } while (interval > 0); + Thread.sleep(interval); + } while (interval > 0); + } // try-with-resources close return(monitor.errorCode); } @@ -296,13 +307,13 @@ public final class Canary implements Tool { } /** - * a Factory method for {@link Monitor}. - * Can be overrided by user. + * A Factory method for {@link Monitor}. + * Can be overridden by user. * @param index a start index for monitor target * @param args args passed from user * @return a Monitor instance */ - public Monitor newMonitor(int index, String[] args) { + public Monitor newMonitor(final Connection connection, int index, String[] args) { Monitor monitor = null; String[] monitorTargets = null; @@ -314,20 +325,20 @@ public final class Canary implements Tool { if(this.regionServerMode) { monitor = new RegionServerMonitor( - this.conf, + connection, monitorTargets, this.useRegExp, (ExtendedSink)this.sink); } else { - monitor = new RegionMonitor(this.conf, monitorTargets, this.useRegExp, this.sink); + monitor = new RegionMonitor(connection, monitorTargets, this.useRegExp, this.sink); } return monitor; } // a Monitor super-class can be extended by users - public static abstract class Monitor implements Runnable { + public static abstract class Monitor implements Runnable, Closeable { - protected Configuration config; + protected Connection connection; protected Admin admin; protected String[] targets; protected boolean useRegExp; @@ -345,12 +356,16 @@ public final class Canary implements Tool { return errorCode != 0; } - protected Monitor(Configuration config, String[] monitorTargets, + @Override + public void close() throws IOException { + if (this.admin != null) this.admin.close(); + } + + protected Monitor(Connection connection, String[] monitorTargets, boolean useRegExp, Sink sink) { - if (null == config) - throw new IllegalArgumentException("config shall not be null"); + if (null == connection) throw new IllegalArgumentException("connection shall not be null"); - this.config = config; + this.connection = connection; this.targets = monitorTargets; this.useRegExp = useRegExp; this.sink = sink; @@ -361,7 +376,7 @@ public final class Canary implements Tool { protected boolean initAdmin() { if (null == this.admin) { try { - this.admin = new HBaseAdmin(config); + this.admin = this.connection.getAdmin(); } catch (Exception e) { LOG.error("Initial HBaseAdmin failed...", e); this.errorCode = INIT_ERROR_EXIT_CODE; @@ -377,9 +392,9 @@ public final class Canary implements Tool { // a monitor for region mode private static class RegionMonitor extends Monitor { - public RegionMonitor(Configuration config, String[] monitorTargets, + public RegionMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, Sink sink) { - super(config, monitorTargets, useRegExp, sink); + super(connection, monitorTargets, useRegExp, sink); } @Override @@ -481,7 +496,7 @@ public final class Canary implements Tool { Table table = null; try { - table = new HTable(admin.getConfiguration(), tableDesc.getTableName()); + table = admin.getConnection().getTable(tableDesc.getTableName()); } catch (TableNotFoundException e) { return; } @@ -556,9 +571,9 @@ public final class Canary implements Tool { //a monitor for regionserver mode private static class RegionServerMonitor extends Monitor { - public RegionServerMonitor(Configuration config, String[] monitorTargets, + public RegionServerMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, ExtendedSink sink) { - super(config, monitorTargets, useRegExp, sink); + super(connection, monitorTargets, useRegExp, sink); } private ExtendedSink getSink() { @@ -622,7 +637,7 @@ public final class Canary implements Tool { region = entry.getValue().get(0); try { tableName = region.getTable(); - table = new HTable(this.admin.getConfiguration(), tableName); + table = admin.getConnection().getTable(tableName); startKey = region.getStartKey(); // Can't do a get on empty start row so do a Scan of first element if any instead. if(startKey.length > 0) { @@ -675,18 +690,19 @@ public final class Canary implements Tool { private Map> getAllRegionServerByName() { Map> rsAndRMap = new HashMap>(); - HTable table = null; + Table table = null; + RegionLocator regionLocator = null; try { HTableDescriptor[] tableDescs = this.admin.listTables(); List regions = null; for (HTableDescriptor tableDesc : tableDescs) { - table = new HTable(this.admin.getConfiguration(), tableDesc.getTableName()); + table = this.admin.getConnection().getTable(tableDesc.getTableName()); + regionLocator = this.admin.getConnection().getRegionLocator(tableDesc.getTableName()); - for (Map.Entry entry : table - .getRegionLocations().entrySet()) { - ServerName rs = entry.getValue(); + for (HRegionLocation location: regionLocator.getAllRegionLocations()) { + ServerName rs = location.getServerName(); String rsName = rs.getHostname(); - HRegionInfo r = entry.getKey(); + HRegionInfo r = location.getRegionInfo(); if (rsAndRMap.containsKey(rsName)) { regions = rsAndRMap.get(rsName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java index 0a75a16..db7bc5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java @@ -21,11 +21,11 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; import java.math.BigInteger; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.LinkedList; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.TreeMap; @@ -39,23 +39,28 @@ import org.apache.commons.lang.ArrayUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.NoServerForRegionException; +import org.apache.hadoop.hbase.client.RegionLocator; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import com.google.common.base.Preconditions; @@ -332,11 +337,11 @@ public class RegionSplitter { if (2 != cmd.getArgList().size() || !oneOperOnly || cmd.hasOption("h")) { new HelpFormatter().printHelp("RegionSplitter \n"+ - "SPLITALGORITHM is a java class name of a class implementing " + - "SplitAlgorithm, or one of the special strings HexStringSplit " + - "or UniformSplit, which are built-in split algorithms. " + - "HexStringSplit treats keys as hexadecimal ASCII, and " + - "UniformSplit treats keys as arbitrary bytes.", opt); + "SPLITALGORITHM is a java class name of a class implementing " + + "SplitAlgorithm, or one of the special strings HexStringSplit " + + "or UniformSplit, which are built-in split algorithms. " + + "HexStringSplit treats keys as hexadecimal ASCII, and " + + "UniformSplit treats keys as arbitrary bytes.", opt); return; } TableName tableName = TableName.valueOf(cmd.getArgs()[0]); @@ -364,8 +369,8 @@ public class RegionSplitter { } static void createPresplitTable(TableName tableName, SplitAlgorithm splitAlgo, - String[] columnFamilies, Configuration conf) throws IOException, - InterruptedException { + String[] columnFamilies, Configuration conf) + throws IOException, InterruptedException { final int splitCount = conf.getInt("split.count", 0); Preconditions.checkArgument(splitCount > 1, "Split count must be > 1"); @@ -378,237 +383,260 @@ public class RegionSplitter { for (String cf : columnFamilies) { desc.addFamily(new HColumnDescriptor(Bytes.toBytes(cf))); } - HBaseAdmin admin = new HBaseAdmin(conf); - try { - Preconditions.checkArgument(!admin.tableExists(tableName), - "Table already exists: " + tableName); - admin.createTable(desc, splitAlgo.split(splitCount)); - } finally { - admin.close(); - } - LOG.debug("Table created! Waiting for regions to show online in META..."); - if (!conf.getBoolean("split.verify", true)) { - // NOTE: createTable is synchronous on the table, but not on the regions - int onlineRegions = 0; - while (onlineRegions < splitCount) { - onlineRegions = MetaTableAccessor.getRegionCount(conf, tableName); - LOG.debug(onlineRegions + " of " + splitCount + " regions online..."); - if (onlineRegions < splitCount) { - Thread.sleep(10 * 1000); // sleep + try (Connection connection = ConnectionFactory.createConnection(conf)) { + Admin admin = connection.getAdmin(); + try { + Preconditions.checkArgument(!admin.tableExists(tableName), + "Table already exists: " + tableName); + admin.createTable(desc, splitAlgo.split(splitCount)); + } finally { + admin.close(); + } + LOG.debug("Table created! Waiting for regions to show online in META..."); + if (!conf.getBoolean("split.verify", true)) { + // NOTE: createTable is synchronous on the table, but not on the regions + int onlineRegions = 0; + while (onlineRegions < splitCount) { + onlineRegions = MetaTableAccessor.getRegionCount(connection, tableName); + LOG.debug(onlineRegions + " of " + splitCount + " regions online..."); + if (onlineRegions < splitCount) { + Thread.sleep(10 * 1000); // sleep + } } } + LOG.debug("Finished creating table with " + splitCount + " regions"); } - - LOG.debug("Finished creating table with " + splitCount + " regions"); } - static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, - Configuration conf) throws IOException, InterruptedException { - final int minOS = conf.getInt("split.outstanding", 2); - - HTable table = new HTable(conf, tableName); - - // max outstanding splits. default == 50% of servers - final int MAX_OUTSTANDING = - Math.max(table.getConnection().getCurrentNrHRS() / 2, minOS); - - Path hbDir = FSUtils.getRootDir(conf); - Path tableDir = FSUtils.getTableDir(hbDir, table.getName()); - Path splitFile = new Path(tableDir, "_balancedSplit"); - FileSystem fs = FileSystem.get(conf); - - // get a list of daughter regions to create - LinkedList> tmpRegionSet = getSplits(table, splitAlgo); - LinkedList> outstanding = Lists.newLinkedList(); - int splitCount = 0; - final int origCount = tmpRegionSet.size(); - - // all splits must compact & we have 1 compact thread, so 2 split - // requests to the same RS can stall the outstanding split queue. - // To fix, group the regions into an RS pool and round-robin through it - LOG.debug("Bucketing regions by regionserver..."); - TreeMap>> daughterRegions = - Maps.newTreeMap(); - for (Pair dr : tmpRegionSet) { - String rsLocation = table.getRegionLocation(dr.getSecond()). - getHostnamePort(); - if (!daughterRegions.containsKey(rsLocation)) { - LinkedList> entry = Lists.newLinkedList(); - daughterRegions.put(rsLocation, entry); - } - daughterRegions.get(rsLocation).add(dr); + /** + * Alternative getCurrentNrHRS which is no longer available. + * @param connection + * @return Rough count of regionservers out on cluster. + * @throws IOException + */ + private static int getRegionServerCount(final Connection connection) throws IOException { + try (Admin admin = connection.getAdmin()) { + ClusterStatus status = admin.getClusterStatus(); + Collection servers = status.getServers(); + return servers == null || servers.isEmpty()? 0: servers.size(); } - LOG.debug("Done with bucketing. Split time!"); - long startTime = System.currentTimeMillis(); - - // open the split file and modify it as splits finish - FSDataInputStream tmpIn = fs.open(splitFile); - byte[] rawData = new byte[tmpIn.available()]; - tmpIn.readFully(rawData); - tmpIn.close(); - FSDataOutputStream splitOut = fs.create(splitFile); - splitOut.write(rawData); + } + private static byte [] readFile(final FileSystem fs, final Path path) throws IOException { + FSDataInputStream tmpIn = fs.open(path); try { - // *** split code *** - while (!daughterRegions.isEmpty()) { - LOG.debug(daughterRegions.size() + " RS have regions to splt."); - - // Get RegionServer : region count mapping - final TreeMap rsSizes = Maps.newTreeMap(); - Map regionsInfo = table.getRegionLocations(); - for (ServerName rs : regionsInfo.values()) { - if (rsSizes.containsKey(rs)) { - rsSizes.put(rs, rsSizes.get(rs) + 1); - } else { - rsSizes.put(rs, 1); - } - } + byte [] rawData = new byte[tmpIn.available()]; + tmpIn.readFully(rawData); + return rawData; + } finally { + tmpIn.close(); + } + } - // sort the RS by the number of regions they have - List serversLeft = Lists.newArrayList(daughterRegions .keySet()); - Collections.sort(serversLeft, new Comparator() { - public int compare(String o1, String o2) { - return rsSizes.get(o1).compareTo(rsSizes.get(o2)); + static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configuration conf) + throws IOException, InterruptedException { + final int minOS = conf.getInt("split.outstanding", 2); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + // Max outstanding splits. default == 50% of servers + final int MAX_OUTSTANDING = Math.max(getRegionServerCount(connection) / 2, minOS); + + Path hbDir = FSUtils.getRootDir(conf); + Path tableDir = FSUtils.getTableDir(hbDir, tableName); + Path splitFile = new Path(tableDir, "_balancedSplit"); + FileSystem fs = FileSystem.get(conf); + + // Get a list of daughter regions to create + LinkedList> tmpRegionSet = null; + try (Table table = connection.getTable(tableName)) { + tmpRegionSet = getSplits(connection, tableName, splitAlgo); + } + LinkedList> outstanding = Lists.newLinkedList(); + int splitCount = 0; + final int origCount = tmpRegionSet.size(); + + // all splits must compact & we have 1 compact thread, so 2 split + // requests to the same RS can stall the outstanding split queue. + // To fix, group the regions into an RS pool and round-robin through it + LOG.debug("Bucketing regions by regionserver..."); + TreeMap>> daughterRegions = + Maps.newTreeMap(); + // Get a regionLocator. Need it in below. + try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) { + for (Pair dr : tmpRegionSet) { + String rsLocation = regionLocator.getRegionLocation(dr.getSecond()).getHostnamePort(); + if (!daughterRegions.containsKey(rsLocation)) { + LinkedList> entry = Lists.newLinkedList(); + daughterRegions.put(rsLocation, entry); } - }); - - // round-robin through the RS list. Choose the lightest-loaded servers - // first to keep the master from load-balancing regions as we split. - for (String rsLoc : serversLeft) { - Pair dr = null; - - // find a region in the RS list that hasn't been moved - LOG.debug("Finding a region on " + rsLoc); - LinkedList> regionList = daughterRegions - .get(rsLoc); - while (!regionList.isEmpty()) { - dr = regionList.pop(); - - // get current region info - byte[] split = dr.getSecond(); - HRegionLocation regionLoc = table.getRegionLocation(split); - - // if this region moved locations - String newRs = regionLoc.getHostnamePort(); - if (newRs.compareTo(rsLoc) != 0) { - LOG.debug("Region with " + splitAlgo.rowToStr(split) - + " moved to " + newRs + ". Relocating..."); - // relocate it, don't use it right now - if (!daughterRegions.containsKey(newRs)) { - LinkedList> entry = Lists.newLinkedList(); - daughterRegions.put(newRs, entry); - } - daughterRegions.get(newRs).add(dr); - dr = null; - continue; - } + daughterRegions.get(rsLocation).add(dr); + } + LOG.debug("Done with bucketing. Split time!"); + long startTime = System.currentTimeMillis(); - // make sure this region wasn't already split - byte[] sk = regionLoc.getRegionInfo().getStartKey(); - if (sk.length != 0) { - if (Bytes.equals(split, sk)) { - LOG.debug("Region already split on " - + splitAlgo.rowToStr(split) + ". Skipping this region..."); - ++splitCount; - dr = null; - continue; - } - byte[] start = dr.getFirst(); - Preconditions.checkArgument(Bytes.equals(start, sk), splitAlgo - .rowToStr(start) + " != " + splitAlgo.rowToStr(sk)); - } + // Open the split file and modify it as splits finish + byte[] rawData = readFile(fs, splitFile); - // passed all checks! found a good region - break; - } - if (regionList.isEmpty()) { - daughterRegions.remove(rsLoc); - } - if (dr == null) - continue; + FSDataOutputStream splitOut = fs.create(splitFile); + try { + splitOut.write(rawData); - // we have a good region, time to split! - byte[] split = dr.getSecond(); - LOG.debug("Splitting at " + splitAlgo.rowToStr(split)); - HBaseAdmin admin = new HBaseAdmin(table.getConfiguration()); try { - admin.split(table.getTableName(), split); - } finally { - admin.close(); - } + // *** split code *** + while (!daughterRegions.isEmpty()) { + LOG.debug(daughterRegions.size() + " RS have regions to splt."); + + // Get ServerName to region count mapping + final TreeMap rsSizes = Maps.newTreeMap(); + List hrls = regionLocator.getAllRegionLocations(); + for (HRegionLocation hrl: hrls) { + ServerName sn = hrl.getServerName(); + if (rsSizes.containsKey(sn)) { + rsSizes.put(sn, rsSizes.get(sn) + 1); + } else { + rsSizes.put(sn, 1); + } + } - LinkedList> finished = Lists.newLinkedList(); - LinkedList> local_finished = Lists.newLinkedList(); - if (conf.getBoolean("split.verify", true)) { - // we need to verify and rate-limit our splits - outstanding.addLast(dr); - // with too many outstanding splits, wait for some to finish - while (outstanding.size() >= MAX_OUTSTANDING) { - LOG.debug("Wait for outstanding splits " + outstanding.size()); - local_finished = splitScan(outstanding, table, splitAlgo); - if (local_finished.isEmpty()) { - Thread.sleep(30 * 1000); - } else { - finished.addAll(local_finished); - outstanding.removeAll(local_finished); - LOG.debug(local_finished.size() + " outstanding splits finished"); + // Sort the ServerNames by the number of regions they have + List serversLeft = Lists.newArrayList(daughterRegions .keySet()); + Collections.sort(serversLeft, new Comparator() { + public int compare(String o1, String o2) { + return rsSizes.get(o1).compareTo(rsSizes.get(o2)); + } + }); + + // Round-robin through the ServerName list. Choose the lightest-loaded servers + // first to keep the master from load-balancing regions as we split. + for (String rsLoc : serversLeft) { + Pair dr = null; + + // Find a region in the ServerName list that hasn't been moved + LOG.debug("Finding a region on " + rsLoc); + LinkedList> regionList = daughterRegions.get(rsLoc); + while (!regionList.isEmpty()) { + dr = regionList.pop(); + + // get current region info + byte[] split = dr.getSecond(); + HRegionLocation regionLoc = regionLocator.getRegionLocation(split); + + // if this region moved locations + String newRs = regionLoc.getHostnamePort(); + if (newRs.compareTo(rsLoc) != 0) { + LOG.debug("Region with " + splitAlgo.rowToStr(split) + + " moved to " + newRs + ". Relocating..."); + // relocate it, don't use it right now + if (!daughterRegions.containsKey(newRs)) { + LinkedList> entry = Lists.newLinkedList(); + daughterRegions.put(newRs, entry); + } + daughterRegions.get(newRs).add(dr); + dr = null; + continue; + } + + // make sure this region wasn't already split + byte[] sk = regionLoc.getRegionInfo().getStartKey(); + if (sk.length != 0) { + if (Bytes.equals(split, sk)) { + LOG.debug("Region already split on " + + splitAlgo.rowToStr(split) + ". Skipping this region..."); + ++splitCount; + dr = null; + continue; + } + byte[] start = dr.getFirst(); + Preconditions.checkArgument(Bytes.equals(start, sk), splitAlgo + .rowToStr(start) + " != " + splitAlgo.rowToStr(sk)); + } + + // passed all checks! found a good region + break; + } + if (regionList.isEmpty()) { + daughterRegions.remove(rsLoc); + } + if (dr == null) + continue; + + // we have a good region, time to split! + byte[] split = dr.getSecond(); + LOG.debug("Splitting at " + splitAlgo.rowToStr(split)); + try (Admin admin = connection.getAdmin()) { + admin.split(tableName, split); + } + + LinkedList> finished = Lists.newLinkedList(); + LinkedList> local_finished = Lists.newLinkedList(); + if (conf.getBoolean("split.verify", true)) { + // we need to verify and rate-limit our splits + outstanding.addLast(dr); + // with too many outstanding splits, wait for some to finish + while (outstanding.size() >= MAX_OUTSTANDING) { + LOG.debug("Wait for outstanding splits " + outstanding.size()); + local_finished = splitScan(outstanding, connection, tableName, splitAlgo); + if (local_finished.isEmpty()) { + Thread.sleep(30 * 1000); + } else { + finished.addAll(local_finished); + outstanding.removeAll(local_finished); + LOG.debug(local_finished.size() + " outstanding splits finished"); + } + } + } else { + finished.add(dr); + } + + // mark each finished region as successfully split. + for (Pair region : finished) { + splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) + + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); + splitCount++; + if (splitCount % 10 == 0) { + long tDiff = (System.currentTimeMillis() - startTime) + / splitCount; + LOG.debug("STATUS UPDATE: " + splitCount + " / " + origCount + + ". Avg Time / Split = " + + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); + } + } } } - } else { - finished.add(dr); - } - - // mark each finished region as successfully split. - for (Pair region : finished) { - splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) - + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); - splitCount++; - if (splitCount % 10 == 0) { - long tDiff = (System.currentTimeMillis() - startTime) - / splitCount; - LOG.debug("STATUS UPDATE: " + splitCount + " / " + origCount - + ". Avg Time / Split = " - + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); + if (conf.getBoolean("split.verify", true)) { + while (!outstanding.isEmpty()) { + LOG.debug("Finally Wait for outstanding splits " + outstanding.size()); + LinkedList> finished = splitScan(outstanding, + connection, tableName, splitAlgo); + if (finished.isEmpty()) { + Thread.sleep(30 * 1000); + } else { + outstanding.removeAll(finished); + for (Pair region : finished) { + splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) + + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); + splitCount++; + } + LOG.debug("Finally " + finished.size() + " outstanding splits finished"); + } + } } - } - } - } - if (conf.getBoolean("split.verify", true)) { - while (!outstanding.isEmpty()) { - LOG.debug("Finally Wait for outstanding splits " + outstanding.size()); - LinkedList> finished = splitScan(outstanding, - table, splitAlgo); - if (finished.isEmpty()) { - Thread.sleep(30 * 1000); - } else { - outstanding.removeAll(finished); - for (Pair region : finished) { - splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) - + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); - splitCount++; + LOG.debug("All regions have been successfully split!"); + } finally { + long tDiff = System.currentTimeMillis() - startTime; + LOG.debug("TOTAL TIME = " + + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); + LOG.debug("Splits = " + splitCount); + if (0 < splitCount) { + LOG.debug("Avg Time / Split = " + + org.apache.hadoop.util.StringUtils.formatTime(tDiff / splitCount)); } - LOG.debug("Finally " + finished.size() + " outstanding splits finished"); } + fs.delete(splitFile, false); + } finally { + splitOut.close(); } } - LOG.debug("All regions have been successfully split!"); - } finally { - long tDiff = System.currentTimeMillis() - startTime; - LOG.debug("TOTAL TIME = " - + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); - LOG.debug("Splits = " + splitCount); - if (0 < splitCount) { - LOG.debug("Avg Time / Split = " - + org.apache.hadoop.util.StringUtils.formatTime(tDiff / splitCount)); - } - - splitOut.close(); - if (table != null){ - table.close(); - } } - fs.delete(splitFile, false); } /** @@ -647,108 +675,134 @@ public class RegionSplitter { } static LinkedList> splitScan( - LinkedList> regionList, HTable table, + LinkedList> regionList, + final Connection connection, + final TableName tableName, SplitAlgorithm splitAlgo) throws IOException, InterruptedException { LinkedList> finished = Lists.newLinkedList(); LinkedList> logicalSplitting = Lists.newLinkedList(); LinkedList> physicalSplitting = Lists.newLinkedList(); - // get table info - Path rootDir = FSUtils.getRootDir(table.getConfiguration()); - Path tableDir = FSUtils.getTableDir(rootDir, table.getName()); - FileSystem fs = tableDir.getFileSystem(table.getConfiguration()); - HTableDescriptor htd = table.getTableDescriptor(); - - // clear the cache to forcibly refresh region information - table.clearRegionCache(); - - // for every region that hasn't been verified as a finished split - for (Pair region : regionList) { - byte[] start = region.getFirst(); - byte[] split = region.getSecond(); - - // see if the new split daughter region has come online - try { - HRegionInfo dri = table.getRegionLocation(split).getRegionInfo(); - if (dri.isOffline() || !Bytes.equals(dri.getStartKey(), split)) { + // Get table info + Pair tableDirAndSplitFile = + getTableDirAndSplitFile(connection.getConfiguration(), tableName); + Path tableDir = tableDirAndSplitFile.getFirst(); + FileSystem fs = tableDir.getFileSystem(connection.getConfiguration()); + // Clear the cache to forcibly refresh region information + ((ClusterConnection)connection).clearRegionCache(); + HTableDescriptor htd = null; + try (Table table = connection.getTable(tableName)) { + htd = table.getTableDescriptor(); + } + try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) { + + // for every region that hasn't been verified as a finished split + for (Pair region : regionList) { + byte[] start = region.getFirst(); + byte[] split = region.getSecond(); + + // see if the new split daughter region has come online + try { + HRegionInfo dri = regionLocator.getRegionLocation(split).getRegionInfo(); + if (dri.isOffline() || !Bytes.equals(dri.getStartKey(), split)) { + logicalSplitting.add(region); + continue; + } + } catch (NoServerForRegionException nsfre) { + // NSFRE will occur if the old hbase:meta entry has no server assigned + LOG.info(nsfre); logicalSplitting.add(region); continue; } - } catch (NoServerForRegionException nsfre) { - // NSFRE will occur if the old hbase:meta entry has no server assigned - LOG.info(nsfre); - logicalSplitting.add(region); - continue; - } - try { - // when a daughter region is opened, a compaction is triggered - // wait until compaction completes for both daughter regions - LinkedList check = Lists.newLinkedList(); - check.add(table.getRegionLocation(start).getRegionInfo()); - check.add(table.getRegionLocation(split).getRegionInfo()); - for (HRegionInfo hri : check.toArray(new HRegionInfo[check.size()])) { - byte[] sk = hri.getStartKey(); - if (sk.length == 0) - sk = splitAlgo.firstRow(); - String startKey = splitAlgo.rowToStr(sk); - - HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem( - table.getConfiguration(), fs, tableDir, hri, true); - - // check every Column Family for that region - boolean refFound = false; - for (HColumnDescriptor c : htd.getFamilies()) { - if ((refFound = regionFs.hasReferences(htd.getTableName().getNameAsString()))) { - break; + try { + // when a daughter region is opened, a compaction is triggered + // wait until compaction completes for both daughter regions + LinkedList check = Lists.newLinkedList(); + check.add(regionLocator.getRegionLocation(start).getRegionInfo()); + check.add(regionLocator.getRegionLocation(split).getRegionInfo()); + for (HRegionInfo hri : check.toArray(new HRegionInfo[check.size()])) { + byte[] sk = hri.getStartKey(); + if (sk.length == 0) + sk = splitAlgo.firstRow(); + + HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem( + connection.getConfiguration(), fs, tableDir, hri, true); + + // Check every Column Family for that region -- check does not have references. + boolean refFound = false; + for (HColumnDescriptor c : htd.getFamilies()) { + if ((refFound = regionFs.hasReferences(c.getNameAsString()))) { + break; + } } - } - // compaction is completed when all reference files are gone - if (!refFound) { - check.remove(hri); + // compaction is completed when all reference files are gone + if (!refFound) { + check.remove(hri); + } } - } - if (check.isEmpty()) { - finished.add(region); - } else { + if (check.isEmpty()) { + finished.add(region); + } else { + physicalSplitting.add(region); + } + } catch (NoServerForRegionException nsfre) { + LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start)); physicalSplitting.add(region); + ((ClusterConnection)connection).clearRegionCache(); } - } catch (NoServerForRegionException nsfre) { - LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start)); - physicalSplitting.add(region); - table.clearRegionCache(); } - } - LOG.debug("Split Scan: " + finished.size() + " finished / " - + logicalSplitting.size() + " split wait / " - + physicalSplitting.size() + " reference wait"); + LOG.debug("Split Scan: " + finished.size() + " finished / " + + logicalSplitting.size() + " split wait / " + + physicalSplitting.size() + " reference wait"); - return finished; + return finished; + } } - static LinkedList> getSplits(HTable table, - SplitAlgorithm splitAlgo) throws IOException { - Path hbDir = FSUtils.getRootDir(table.getConfiguration()); - Path tableDir = FSUtils.getTableDir(hbDir, table.getName()); + /** + * @param conf + * @param tableName + * @return A Pair where first item is table dir and second is the split file. + * @throws IOException + */ + private static Pair getTableDirAndSplitFile(final Configuration conf, + final TableName tableName) + throws IOException { + Path hbDir = FSUtils.getRootDir(conf); + Path tableDir = FSUtils.getTableDir(hbDir, tableName); Path splitFile = new Path(tableDir, "_balancedSplit"); - FileSystem fs = tableDir.getFileSystem(table.getConfiguration()); + return new Pair(tableDir, splitFile); + } - // using strings because (new byte[]{0}).equals(new byte[]{0}) == false + static LinkedList> getSplits(final Connection connection, + TableName tableName, SplitAlgorithm splitAlgo) + throws IOException { + Pair tableDirAndSplitFile = + getTableDirAndSplitFile(connection.getConfiguration(), tableName); + Path tableDir = tableDirAndSplitFile.getFirst(); + Path splitFile = tableDirAndSplitFile.getSecond(); + + FileSystem fs = tableDir.getFileSystem(connection.getConfiguration()); + + // Using strings because (new byte[]{0}).equals(new byte[]{0}) == false Set> daughterRegions = Sets.newHashSet(); - // does a split file exist? + // Does a split file exist? if (!fs.exists(splitFile)) { // NO = fresh start. calculate splits to make - LOG.debug("No _balancedSplit file. Calculating splits..."); + LOG.debug("No " + splitFile.getName() + " file. Calculating splits "); - // query meta for all regions in the table + // Query meta for all regions in the table Set> rows = Sets.newHashSet(); - Pair tmp = table.getStartEndKeys(); - Preconditions.checkArgument( - tmp.getFirst().length == tmp.getSecond().length, + Pair tmp = null; + try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) { + tmp = regionLocator.getStartEndKeys(); + } + Preconditions.checkArgument(tmp.getFirst().length == tmp.getSecond().length, "Start and End rows should be equivalent"); for (int i = 0; i < tmp.getFirst().length; ++i) { byte[] start = tmp.getFirst()[i], end = tmp.getSecond()[i]; @@ -758,8 +812,7 @@ public class RegionSplitter { end = splitAlgo.lastRow(); rows.add(Pair.newPair(start, end)); } - LOG.debug("Table " + Bytes.toString(table.getTableName()) + " has " - + rows.size() + " regions that will be split."); + LOG.debug("Table " + tableName + " has " + rows.size() + " regions that will be split."); // prepare the split file Path tmpFile = new Path(tableDir, "_balancedSplit_prepare"); @@ -780,8 +833,8 @@ public class RegionSplitter { fs.rename(tmpFile, splitFile); } else { LOG.debug("_balancedSplit file found. Replay log to restore state..."); - FSUtils.getInstance(fs, table.getConfiguration()) - .recoverFileLease(fs, splitFile, table.getConfiguration(), null); + FSUtils.getInstance(fs, connection.getConfiguration()) + .recoverFileLease(fs, splitFile, connection.getConfiguration(), null); // parse split file and process remaining splits FSDataInputStream tmpIn = fs.open(splitFile); @@ -1062,4 +1115,4 @@ public class RegionSplitter { + "," + rowToStr(lastRow()) + "]"; } } -} +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java index 0078ebf..4545842 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java @@ -27,6 +27,7 @@ import java.util.concurrent.ConcurrentSkipListMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.fs.HFileSystem; @@ -266,4 +267,10 @@ class MockRegionServerServices implements RegionServerServices { // TODO Auto-generated method stub return false; } + + @Override + public Connection getConnection() { + // TODO Auto-generated method stub + return null; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java index f3bf9c4..56ba695 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java @@ -134,6 +134,8 @@ public class HConnectionTestingUtility { Mockito.when(c.getNewRpcRetryingCallerFactory(conf)).thenReturn( RpcRetryingCallerFactory.instantiate(conf, RetryingCallerInterceptorFactory.NO_OP_INTERCEPTOR)); + HTableInterface t = Mockito.mock(HTableInterface.class); + Mockito.when(c.getTable((TableName)Mockito.any())).thenReturn(t); return c; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java index 6e9f8d8..628bb96 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java @@ -136,7 +136,6 @@ public class TestTableMapReduceUtil { * does not exceed the number of regions for the given table. */ @Test - @SuppressWarnings("deprecation") public void shouldNumberOfReduceTaskNotExceedNumberOfRegionsForGivenTable() throws IOException { Assert.assertNotNull(presidentsTable); @@ -155,7 +154,6 @@ public class TestTableMapReduceUtil { } @Test - @SuppressWarnings("deprecation") public void shouldNumberOfMapTaskNotExceedNumberOfRegionsForGivenTable() throws IOException { Configuration cfg = UTIL.getConfiguration(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index 98b59bb..ac73886 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.Result; @@ -113,7 +114,7 @@ import com.google.protobuf.ServiceException; * Use this when you can't bend Mockito to your liking (e.g. return null result * when 'scanning' until master timesout and then return a coherent meta row * result thereafter. Have some facility for faking gets and scans. See - * {@link #setGetResult(byte[], byte[], Result)} for how to fill the backing data + * setGetResult(byte[], byte[], Result) for how to fill the backing data * store that the get pulls from. */ class MockRegionServer @@ -614,4 +615,10 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { throws ServiceException { return null; } + + @Override + public Connection getConnection() { + // TODO Auto-generated method stub + return null; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java index 649d1a5..0dadcf1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -320,6 +321,12 @@ public class TestActiveMasterManager { public ActiveMasterManager getActiveMasterManager() { return activeMasterManager; } + + @Override + public Connection getConnection() { + // TODO Auto-generated method stub + return null; + } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 9bf3d10..9bc2cbd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -51,12 +51,16 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.MetaMockingUtil; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; +import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination; +import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLogManagerDetails; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator; @@ -146,7 +150,7 @@ public class TestCatalogJanitor { @Override public HConnection getShortCircuitConnection() { - return this.connection; + return null; } @Override @@ -171,7 +175,12 @@ public class TestCatalogJanitor { @Override public CoordinatedStateManager getCoordinatedStateManager() { - return null; + BaseCoordinatedStateManager m = Mockito.mock(BaseCoordinatedStateManager.class); + SplitLogManagerCoordination c = Mockito.mock(SplitLogManagerCoordination.class); + Mockito.when(m.getSplitLogManagerCoordination()).thenReturn(c); + SplitLogManagerDetails d = Mockito.mock(SplitLogManagerDetails.class); + Mockito.when(c.getDetails()).thenReturn(d); + return m; } @Override @@ -195,6 +204,11 @@ public class TestCatalogJanitor { HConnectionManager.deleteConnection(this.connection.getConfiguration()); } } + + @Override + public Connection getConnection() { + return this.connection; + } } /** @@ -453,6 +467,11 @@ public class TestCatalogJanitor { // Auto-generated method stub return false; } + + @Override + public Connection getConnection() { + return null; + } } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java index 72403bf..3f61fff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java @@ -26,7 +26,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.*; -import org.apache.hadoop.hbase.CoordinatedStateManager; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -89,6 +89,12 @@ public class TestClockSkewDetection { @Override public void stop(String why) { + } + + @Override + public Connection getConnection() { + // TODO Auto-generated method stub + return null; }}, null, false); LOG.debug("regionServerStartup 1"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java index 76a6db9..5952918 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitLogManager.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.SplitLogCounters; import org.apache.hadoop.hbase.SplitLogTask; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination; import org.apache.hadoop.hbase.master.SplitLogManager.Task; @@ -163,6 +164,12 @@ public class TestSplitLogManager { return null; } + @Override + public Connection getConnection() { + // TODO Auto-generated method stub + return null; + } + } static Stoppable stopper = new Stoppable() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java index a0b479f..3c1250d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -248,5 +249,11 @@ public class TestHFileCleaner { public boolean isStopped() { return false; } + + @Override + public Connection getConnection() { + // TODO Auto-generated method stub + return null; + } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java index f4fff62..a29a598 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.CoordinatedStateManager; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -182,5 +183,11 @@ public class TestHFileLinkCleaner { public boolean isStopped() { return false; } + + @Override + public Connection getConnection() { + // TODO Auto-generated method stub + return null; + } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java index 2400584..8cc595d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java @@ -27,7 +27,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.*; -import org.apache.hadoop.hbase.CoordinatedStateManager; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationQueues; @@ -189,6 +189,12 @@ public class TestLogsCleaner { public boolean isStopped() { return false; } + + @Override + public Connection getConnection() { + // TODO Auto-generated method stub + return null; + } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java index 84d9155..34239c0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaTableUtil.java @@ -18,6 +18,9 @@ package org.apache.hadoop.hbase.quotas; +import static org.junit.Assert.assertEquals; + +import java.io.IOException; import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; @@ -25,20 +28,20 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; - +import org.junit.After; import org.junit.AfterClass; +import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; -import static org.junit.Assert.assertEquals; - /** * Test the quota table helpers (e.g. CRUD operations) */ @@ -47,6 +50,7 @@ public class TestQuotaTableUtil { final Log LOG = LogFactory.getLog(getClass()); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private Connection connection; @BeforeClass public static void setUpBeforeClass() throws Exception { @@ -66,6 +70,16 @@ public class TestQuotaTableUtil { TEST_UTIL.shutdownMiniCluster(); } + @Before + public void before() throws IOException { + this.connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + } + + @After + public void after() throws IOException { + this.connection.close(); + } + @Test public void testTableQuotaUtil() throws Exception { final TableName table = TableName.valueOf("testTableQuotaUtilTable"); @@ -79,13 +93,13 @@ public class TestQuotaTableUtil { .build(); // Add user quota and verify it - QuotaUtil.addTableQuota(TEST_UTIL.getConfiguration(), table, quota); - Quotas resQuota = QuotaUtil.getTableQuota(TEST_UTIL.getConfiguration(), table); + QuotaUtil.addTableQuota(this.connection, table, quota); + Quotas resQuota = QuotaUtil.getTableQuota(this.connection, table); assertEquals(quota, resQuota); // Remove user quota and verify it - QuotaUtil.deleteTableQuota(TEST_UTIL.getConfiguration(), table); - resQuota = QuotaUtil.getTableQuota(TEST_UTIL.getConfiguration(), table); + QuotaUtil.deleteTableQuota(this.connection, table); + resQuota = QuotaUtil.getTableQuota(this.connection, table); assertEquals(null, resQuota); } @@ -102,13 +116,13 @@ public class TestQuotaTableUtil { .build(); // Add user quota and verify it - QuotaUtil.addNamespaceQuota(TEST_UTIL.getConfiguration(), namespace, quota); - Quotas resQuota = QuotaUtil.getNamespaceQuota(TEST_UTIL.getConfiguration(), namespace); + QuotaUtil.addNamespaceQuota(this.connection, namespace, quota); + Quotas resQuota = QuotaUtil.getNamespaceQuota(this.connection, namespace); assertEquals(quota, resQuota); // Remove user quota and verify it - QuotaUtil.deleteNamespaceQuota(TEST_UTIL.getConfiguration(), namespace); - resQuota = QuotaUtil.getNamespaceQuota(TEST_UTIL.getConfiguration(), namespace); + QuotaUtil.deleteNamespaceQuota(this.connection, namespace); + resQuota = QuotaUtil.getNamespaceQuota(this.connection, namespace); assertEquals(null, resQuota); } @@ -139,33 +153,33 @@ public class TestQuotaTableUtil { .build(); // Add user global quota - QuotaUtil.addUserQuota(TEST_UTIL.getConfiguration(), user, quota); - Quotas resQuota = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user); + QuotaUtil.addUserQuota(this.connection, user, quota); + Quotas resQuota = QuotaUtil.getUserQuota(this.connection, user); assertEquals(quota, resQuota); // Add user quota for table - QuotaUtil.addUserQuota(TEST_UTIL.getConfiguration(), user, table, quotaTable); - Quotas resQuotaTable = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user, table); + QuotaUtil.addUserQuota(this.connection, user, table, quotaTable); + Quotas resQuotaTable = QuotaUtil.getUserQuota(this.connection, user, table); assertEquals(quotaTable, resQuotaTable); // Add user quota for namespace - QuotaUtil.addUserQuota(TEST_UTIL.getConfiguration(), user, namespace, quotaNamespace); - Quotas resQuotaNS = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user, namespace); + QuotaUtil.addUserQuota(this.connection, user, namespace, quotaNamespace); + Quotas resQuotaNS = QuotaUtil.getUserQuota(this.connection, user, namespace); assertEquals(quotaNamespace, resQuotaNS); // Delete user global quota - QuotaUtil.deleteUserQuota(TEST_UTIL.getConfiguration(), user); - resQuota = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user); + QuotaUtil.deleteUserQuota(this.connection, user); + resQuota = QuotaUtil.getUserQuota(this.connection, user); assertEquals(null, resQuota); // Delete user quota for table - QuotaUtil.deleteUserQuota(TEST_UTIL.getConfiguration(), user, table); - resQuotaTable = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user, table); + QuotaUtil.deleteUserQuota(this.connection, user, table); + resQuotaTable = QuotaUtil.getUserQuota(this.connection, user, table); assertEquals(null, resQuotaTable); // Delete user quota for namespace - QuotaUtil.deleteUserQuota(TEST_UTIL.getConfiguration(), user, namespace); - resQuotaNS = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user, namespace); + QuotaUtil.deleteUserQuota(this.connection, user, namespace); + resQuotaNS = QuotaUtil.getUserQuota(this.connection, user, namespace); assertEquals(null, resQuotaNS); } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java index fcfe063..eeea558 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.io.hfile.BlockCache; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; import org.apache.hadoop.hbase.io.hfile.CacheStats; @@ -490,6 +491,12 @@ public class TestHeapMemoryManager { public ServerName getServerName() { return ServerName.valueOf("server1",4000,12345); } + + @Override + public Connection getConnection() { + // TODO Auto-generated method stub + return null; + } } static class CustomHeapMemoryTuner implements HeapMemoryTuner { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java index 1ae58f1..eee4a0e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogCounters; import org.apache.hadoop.hbase.SplitLogTask; import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorType; @@ -137,6 +138,12 @@ public class TestSplitLogWorker { return null; } + @Override + public Connection getConnection() { + // TODO Auto-generated method stub + return null; + } + } private void waitForCounter(AtomicLong ctr, long oldval, long newval, long timems) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java index 0971d8c..c052824 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; @@ -183,6 +184,12 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic { public boolean isStopped() { return this.isStopped; } + + @Override + public Connection getConnection() { + // TODO Auto-generated method stub + return null; + } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java index 73a631e..53f6bf3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hbase.replication; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + import java.util.ArrayList; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; @@ -25,11 +29,12 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClusterId; +import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.CoordinatedStateManager; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; @@ -38,13 +43,9 @@ import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.AfterClass; -import org.junit.Test; -import org.junit.Ignore; - -import static org.junit.Assert.*; - import org.junit.Before; import org.junit.BeforeClass; +import org.junit.Test; import org.junit.experimental.categories.Category; /** @@ -284,6 +285,12 @@ public class TestReplicationTrackerZKImpl { public boolean isStopped() { return this.isStopped; } + + @Override + public Connection getConnection() { + // TODO Auto-generated method stub + return null; + } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java index 43b37d0..df16017 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.CoordinatedStateManager; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; @@ -473,7 +474,10 @@ public class TestReplicationSourceManager { public boolean isStopped() { return false; // To change body of implemented methods use File | Settings | File Templates. } - } - -} + @Override + public Connection getConnection() { + return null; + } + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java index ce143ab..11fa337 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; @@ -314,6 +315,12 @@ public class TestTokenAuthentication { throw new ServiceException(ioe); } } + + @Override + public Connection getConnection() { + // TODO Auto-generated method stub + return null; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java index ca83eb2..5e1b4aa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -121,4 +122,10 @@ public class MockServer implements Server { // TODO Auto-generated method stub return this.aborted; } + + @Override + public Connection getConnection() { + // TODO Auto-generated method stub + return null; + } }