diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 3282838..896ef48 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -34,11 +34,13 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -180,7 +182,7 @@ public class MetaTableAccessor { if (connection == null || connection.isClosed()) { throw new NullPointerException("No connection"); } - return new HTable(tableName, connection); + return connection.getTable(tableName); } /** @@ -887,11 +889,10 @@ public class MetaTableAccessor { */ public static int getRegionCount(final Configuration c, final TableName tableName) throws IOException { - HTable t = new HTable(c, tableName); - try { - return t.getRegionLocations().size(); - } finally { - t.close(); + try (Connection connection = ConnectionFactory.createConnection(c)) { + try (RegionLocator locator = connection.getRegionLocator(tableName)) { + return locator.getAllRegionLocations().size(); + } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java index 234c5ae..62e11374 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java @@ -161,15 +161,6 @@ public class RegionLoad { } /** - * @return the data locality of region in the regionserver. - */ - public float getDataLocality() { - if (regionLoadPB.hasDataLocality()) { - return regionLoadPB.getDataLocality(); - } - return 0.0f; - } - /** * @see java.lang.Object#toString() */ @Override @@ -214,8 +205,6 @@ public class RegionLoad { compactionProgressPct); sb = Strings.appendKeyValue(sb, "completeSequenceId", this.getCompleteSequenceId()); - sb = Strings.appendKeyValue(sb, "dataLocality", - this.getDataLocality()); return sb.toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index 374ce28..b489af2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.security.UserProvider; * A non-instantiable class that manages creation of {@link Connection}s. * Managing the lifecycle of the {@link Connection}s to the cluster is the responsibility of * the caller. - * From this {@link Connection} {@link Table} implementations are retrieved + * From a {@link Connection}, {@link Table} implementations are retrieved * with {@link Connection#getTable(TableName)}. Example: *
  * Connection connection = ConnectionFactory.createConnection(config);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 75dd72c..7136f72 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -68,9 +68,11 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.exceptions.MergeRegionException;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.hadoop.hbase.ipc.MasterCoprocessorRpcChannel;
+import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
 import org.apache.hadoop.hbase.ipc.RegionServerCoprocessorRpcChannel;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
@@ -83,6 +85,9 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterReque
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
@@ -106,6 +111,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnaps
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest;
@@ -661,24 +668,47 @@ public class HBaseAdmin implements Admin {
     // Wait until all regions deleted
     for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
       try {
-        // Find whether all regions are deleted.
-        List regionLations =
-            MetaScanner.listTableRegionLocations(conf, connection, tableName);
+        HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName);
+        Scan scan = MetaTableAccessor.getScanForTableName(tableName);
+        scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
+        ScanRequest request = RequestConverter.buildScanRequest(
+          firstMetaServer.getRegionInfo().getRegionName(), scan, 1, true);
+        Result[] values = null;
+        // Get a batch at a time.
+        ClientService.BlockingInterface server = connection.getClient(firstMetaServer
+            .getServerName());
+        PayloadCarryingRpcController controller = new PayloadCarryingRpcController();
+        try {
+          controller.setPriority(tableName);
+          ScanResponse response = server.scan(controller, request);
+          values = ResponseConverter.getResults(controller.cellScanner(), response);
+        } catch (ServiceException se) {
+          throw ProtobufUtil.getRemoteException(se);
+        }
 
         // let us wait until hbase:meta table is updated and
         // HMaster removes the table from its HTableDescriptors
-        if (regionLations == null || regionLations.size() == 0) {
-          HTableDescriptor htd = getTableDescriptorByTableName(tableName);
-
-          if (htd == null) {
-            // table could not be found in master - we are done.
-            tableExists = false;
+        if (values == null || values.length == 0) {
+          tableExists = false;
+          GetTableDescriptorsResponse htds;
+          MasterKeepAliveConnection master = connection.getKeepAliveMasterService();
+          try {
+            GetTableDescriptorsRequest req =
+              RequestConverter.buildGetTableDescriptorsRequest(tableName);
+            htds = master.getTableDescriptors(null, req);
+          } catch (ServiceException se) {
+            throw ProtobufUtil.getRemoteException(se);
+          } finally {
+            master.close();
+          }
+          tableExists = !htds.getTableSchemaList().isEmpty();
+          if (!tableExists) {
             break;
           }
         }
       } catch (IOException ex) {
         failures++;
-        if(failures >= numRetries - 1) {           // no more tries left
+        if(failures == numRetries - 1) {           // no more tries left
           if (ex instanceof RemoteException) {
             throw ((RemoteException) ex).unwrapRemoteException();
           } else {
@@ -2590,27 +2620,6 @@ public class HBaseAdmin implements Admin {
   }
 
   /**
-   * Get tableDescriptor
-   * @param tableName one table name
-   * @return HTD the HTableDescriptor or null if the table not exists
-   * @throws IOException if a remote or network exception occurs
-   */
-  private HTableDescriptor getTableDescriptorByTableName(TableName tableName)
-      throws IOException {
-    List tableNames = new ArrayList(1);
-    tableNames.add(tableName);
-
-    HTableDescriptor[] htdl = getTableDescriptorsByTableName(tableNames);
-
-    if (htdl == null || htdl.length == 0) {
-      return null;
-    }
-    else {
-      return htdl[0];
-    }
-  }
-
-  /**
    * Get tableDescriptors
    * @param names List of table names
    * @return HTD[] the tableDescriptor
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index c3a94e3..dbaaffa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -80,24 +80,23 @@ import com.google.protobuf.Service;
 import com.google.protobuf.ServiceException;
 
 /**
- *
- * HTable is no longer a client API. It is marked InterfaceAudience.Private indicating that
- * this is an HBase-internal class as defined in
- * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
- * There are no guarantees for backwards source / binary compatibility and methods or class can
- * change or go away without deprecation. Use {@link Connection#getTable(TableName)}
- * to obtain an instance of {@link Table} instead of constructing an HTable directly.
- * 

An implementation of {@link Table}. Used to communicate with a single HBase table. + * An implementation of {@link Table}. Used to communicate with a single HBase table. * Lightweight. Get as needed and just close when done. * Instances of this class SHOULD NOT be constructed directly. * Obtain an instance via {@link Connection}. See {@link ConnectionFactory} * class comment for an example of how. * - *

This class is NOT thread safe for reads nor write. + *

This class is NOT thread safe for reads nor writes. * In the case of writes (Put, Delete), the underlying write buffer can * be corrupted if multiple threads contend over a single HTable instance. * In the case of reads, some fields used by a Scan are shared among all threads. * + *

HTable is no longer a client API. Use {@link Table} instead. It is marked + * InterfaceAudience.Private indicating that this is an HBase-internal class as defined in + * Hadoop Interface Classification + * There are no guarantees for backwards source / binary compatibility and methods or class can + * change or go away without deprecation. + * * @see Table * @see Admin * @see Connection @@ -291,6 +290,8 @@ public class HTable implements HTableInterface, RegionLocator { /** * Creates an object to access a HBase table. + * Used by HBase internally. DO NOT USE. See {@link ConnectionFactory} class comment for how to + * get a {@link Table} instance (use {@link Table} instead of {@link HTable}). * @param tableName Name of the table. * @param connection HConnection to be used. * @param pool ExecutorService to be used. @@ -1799,11 +1800,11 @@ public class HTable implements HTableInterface, RegionLocator { * @throws IOException */ public static void main(String[] args) throws IOException { - Table t = new HTable(HBaseConfiguration.create(), args[0]); - try { - System.out.println(t.get(new Get(Bytes.toBytes(args[1])))); - } finally { - t.close(); + Configuration conf = HBaseConfiguration.create(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(TableName.valueOf(args[0]))) { + System.out.println(table.get(new Get(Bytes.toBytes(args[1])))); + } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java index 5312dfb..c697979 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java @@ -64,8 +64,7 @@ public class MetaScanner { * @param visitor A custom visitor * @throws IOException e */ - public static void metaScan(Configuration configuration, - MetaScannerVisitor visitor) + public static void metaScan(Configuration configuration, MetaScannerVisitor visitor) throws IOException { metaScan(configuration, visitor, null, null, Integer.MAX_VALUE); } @@ -106,8 +105,7 @@ public class MetaScanner { MetaScannerVisitor visitor, TableName userTableName, byte[] row, int rowLimit) throws IOException { - metaScan(configuration, null, visitor, userTableName, row, rowLimit, - TableName.META_TABLE_NAME); + metaScan(configuration, null, visitor, userTableName, row, rowLimit, TableName.META_TABLE_NAME); } /** @@ -133,7 +131,7 @@ public class MetaScanner { throws IOException { boolean closeConnection = false; - if (connection == null){ + if (connection == null) { connection = ConnectionFactory.createConnection(configuration); closeConnection = true; } @@ -141,16 +139,16 @@ public class MetaScanner { int rowUpperLimit = rowLimit > 0 ? rowLimit: Integer.MAX_VALUE; // Calculate startrow for scan. byte[] startRow; - ResultScanner scanner = null; - HTable metaTable = null; - try { - metaTable = new HTable(TableName.META_TABLE_NAME, connection, null); + try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { if (row != null) { // Scan starting at a particular row in a particular table byte[] searchRow = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false); - - Result startRowResult = metaTable.getRowOrBefore(searchRow, HConstants.CATALOG_FAMILY); - + Scan scan = + Scan.createGetClosestRowOrBeforeReverseScan(searchRow, HConstants.CATALOG_FAMILY); + Result startRowResult = null; + try (ResultScanner resultScanner = metaTable.getScanner(scan)) { + startRowResult = resultScanner.next(); + } if (startRowResult == null) { throw new TableNotFoundException("Cannot find row in "+ TableName .META_TABLE_NAME.getNameAsString()+" for table: " @@ -184,25 +182,18 @@ public class MetaScanner { Bytes.toStringBinary(startRow) + " for max=" + rowUpperLimit + " with caching=" + rows); } // Run the scan - scanner = metaTable.getScanner(scan); - Result result; - int processedRows = 0; - while ((result = scanner.next()) != null) { - if (visitor != null) { - if (!visitor.processRow(result)) break; + try (ResultScanner resultScanner = metaTable.getScanner(scan)) { + Result result; + int processedRows = 0; + while ((result = resultScanner.next()) != null) { + if (visitor != null) { + if (!visitor.processRow(result)) break; + } + processedRows++; + if (processedRows >= rowUpperLimit) break; } - processedRows++; - if (processedRows >= rowUpperLimit) break; } } finally { - if (scanner != null) { - try { - scanner.close(); - } catch (Throwable t) { - ExceptionUtil.rethrowIfInterrupt(t); - LOG.debug("Got exception in closing the result scanner", t); - } - } if (visitor != null) { try { visitor.close(); @@ -211,16 +202,8 @@ public class MetaScanner { LOG.debug("Got exception in closing the meta scanner visitor", t); } } - if (metaTable != null) { - try { - metaTable.close(); - } catch (Throwable t) { - ExceptionUtil.rethrowIfInterrupt(t); - LOG.debug("Got exception in closing meta table", t); - } - } if (closeConnection) { - connection.close(); + if (connection != null) connection.close(); } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java index 8168fe1..754beb0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.util.Pair; /** * Used to view region location information for a single HBase table. - * Obtain an instance from an {@link HConnection}. + * Obtain an instance from a {@link Connection}. * * @see ConnectionFactory * @see Connection @@ -98,4 +98,4 @@ public interface RegionLocator extends Closeable { * Gets the fully qualified table name instance of this table. */ TableName getName(); -} +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index b5bd365..2098939 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -52,8 +52,8 @@ import org.apache.hadoop.hbase.util.Bytes; * To scan everything for each row, instantiate a Scan object. *

* To modify scanner caching for just this scan, use {@link #setCaching(int) setCaching}. - * If caching is NOT set, we will use the caching value of the hosting {@link HTable}. See - * {@link HTable#setScannerCaching(int)}. In addition to row caching, it is possible to specify a + * If caching is NOT set, we will use the caching value of the hosting {@link Table}. + * In addition to row caching, it is possible to specify a * maximum result size, using {@link #setMaxResultSize(long)}. When both are used, * single server requests are limited by either number of rows or maximum result size, whichever * limit comes first. @@ -893,4 +893,19 @@ public class Scan extends Query { return (Scan) super.setIsolationLevel(level); } -} + /** + * Utility that creates a Scan that will do a small scan in reverse from passed row+family + * looking for next closest row. + * @param row + * @param family + * @return An instance of Scan primed with passed row and family to + * scan in reverse for one row only. + */ + public static Scan createGetClosestRowOrBeforeReverseScan(byte[] row, byte[] family) { + Scan scan = new Scan(row, family); + scan.setSmall(true); + scan.setReversed(true); + scan.setCaching(1); + return scan; + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java index 85ce4e2..d9ce737 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java @@ -36,7 +36,8 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; @@ -102,13 +103,9 @@ public class AggregationClient { public R max( final TableName tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); - return max(table, ci, scan); - } finally { - if (table != null) { - table.close(); + try (Connection connection = ConnectionFactory.createConnection(this.conf)) { + try (Table table = connection.getTable(tableName)) { + return max(table, ci, scan); } } } @@ -197,13 +194,9 @@ public class AggregationClient { public R min( final TableName tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); - return min(table, ci, scan); - } finally { - if (table != null) { - table.close(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(tableName)) { + return min(table, ci, scan); } } } @@ -277,13 +270,9 @@ public class AggregationClient { public long rowCount( final TableName tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); - return rowCount(table, ci, scan); - } finally { - if (table != null) { - table.close(); + try (Connection connection = ConnectionFactory.createConnection(this.conf)) { + try (Table table = connection.getTable(tableName)) { + return rowCount(table, ci, scan); } } } @@ -351,13 +340,9 @@ public class AggregationClient { public S sum( final TableName tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); - return sum(table, ci, scan); - } finally { - if (table != null) { - table.close(); + try (Connection connection = ConnectionFactory.createConnection(this.conf)) { + try (Table table = connection.getTable(tableName)) { + return sum(table, ci, scan); } } } @@ -424,13 +409,9 @@ public class AggregationClient { private Pair getAvgArgs( final TableName tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); - return getAvgArgs(table, ci, scan); - } finally { - if (table != null) { - table.close(); + try (Connection connection = ConnectionFactory.createConnection(this.conf)) { + try (Table table = connection.getTable(tableName)) { + return getAvgArgs(table, ci, scan); } } } @@ -615,13 +596,9 @@ public class AggregationClient { public double std(final TableName tableName, ColumnInterpreter ci, Scan scan) throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); - return std(table, ci, scan); - } finally { - if (table != null) { - table.close(); + try (Connection connection = ConnectionFactory.createConnection(this.conf)) { + try (Table table = connection.getTable(tableName)) { + return std(table, ci, scan); } } } @@ -728,13 +705,9 @@ public class AggregationClient { public R median(final TableName tableName, ColumnInterpreter ci, Scan scan) throws Throwable { - Table table = null; - try { - table = new HTable(conf, tableName); - return median(table, ci, scan); - } finally { - if (table != null) { - table.close(); + try (Connection connection = ConnectionFactory.createConnection(this.conf)) { + try (Table table = connection.getTable(tableName)) { + return median(table, ci, scan); } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java index e808904..cd172b0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java @@ -28,23 +28,26 @@ Provides HBase Client

Overview

To administer HBase, create and drop tables, list and alter tables, - use {@link org.apache.hadoop.hbase.client.HBaseAdmin}. Once created, table access is via an instance - of {@link org.apache.hadoop.hbase.client.HTable}. You add content to a table a row at a time. To insert, - create an instance of a {@link org.apache.hadoop.hbase.client.Put} object. Specify value, target column - and optionally a timestamp. Commit your update using {@link org.apache.hadoop.hbase.client.HTable#put(Put)}. - To fetch your inserted value, use {@link org.apache.hadoop.hbase.client.Get}. The Get can be specified to be broad -- get all - on a particular row -- or narrow; i.e. return only a single cell value. After creating an instance of - Get, invoke {@link org.apache.hadoop.hbase.client.HTable#get(Get)}. Use - {@link org.apache.hadoop.hbase.client.Scan} to set up a scanner -- a Cursor- like access. After - creating and configuring your Scan instance, call {@link org.apache.hadoop.hbase.client.HTable#getScanner(Scan)} and then - invoke next on the returned object. Both {@link org.apache.hadoop.hbase.client.HTable#get(Get)} and - {@link org.apache.hadoop.hbase.client.HTable#getScanner(Scan)} return a + use {@link org.apache.hadoop.hbase.client.Admin}. Once created, table access is via an instance + of {@link org.apache.hadoop.hbase.client.Table}. You add content to a table a row at a time. To + insert, create an instance of a {@link org.apache.hadoop.hbase.client.Put} object. Specify value, + target column and optionally a timestamp. Commit your update using + {@link org.apache.hadoop.hbase.client.Table#put(Put)}. + To fetch your inserted value, use {@link org.apache.hadoop.hbase.client.Get}. The Get can be + specified to be broad -- get all on a particular row -- or narrow; i.e. return only a single cell + value. After creating an instance of + Get, invoke {@link org.apache.hadoop.hbase.client.Table#get(Get)}. + +

Use {@link org.apache.hadoop.hbase.client.Scan} to set up a scanner -- a Cursor- like access. + After creating and configuring your Scan instance, call + {@link org.apache.hadoop.hbase.client.Table#getScanner(Scan)} and then + invoke next on the returned object. Both {@link org.apache.hadoop.hbase.client.Table#get(Get)} + and {@link org.apache.hadoop.hbase.client.Table#getScanner(Scan)} return a {@link org.apache.hadoop.hbase.client.Result}. -A Result is a List of {@link org.apache.hadoop.hbase.KeyValue}s. It has facility for packaging the return -in different formats. - Use {@link org.apache.hadoop.hbase.client.Delete} to remove content. + +

Use {@link org.apache.hadoop.hbase.client.Delete} to remove content. You can remove individual cells or entire families, etc. Pass it to - {@link org.apache.hadoop.hbase.client.HTable#delete(Delete)} to execute. + {@link org.apache.hadoop.hbase.client.Table#delete(Delete)} to execute.

Puts, Gets and Deletes take out a lock on the target row for the duration of their operation. Concurrent modifications to a single row are serialized. Gets and scans run concurrently without @@ -68,8 +71,11 @@ in different formats. import java.io.IOException; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -87,9 +93,16 @@ public class MyLittleHBaseClient { // be found on the CLASSPATH Configuration config = HBaseConfiguration.create(); - // This instantiates an HTable object that connects you to - // the "myLittleHBaseTable" table. - HTable table = new HTable(config, "myLittleHBaseTable"); + // Next you need a Connection to the cluster. Create one. When done with it, + // close it (Should start a try/finally after this creation so it gets closed + // for sure but leaving this out for readibility's sake). + Connection connection = ConnectionFactory.createConnection(config); + + // This instantiates a Table object that connects you to + // the "myLittleHBaseTable" table (TableName.valueOf turns String into TableName instance). + // When done with it, close it (Should start a try/finally after this creation so it gets + // closed for sure but leaving this out for readibility's sake). + Table table = connection.getTable(TableName.valueOf("myLittleHBaseTable")); // To add to a row, use Put. A Put constructor takes the name of the row // you want to insert into as a byte array. In HBase, the Bytes class has @@ -152,15 +165,19 @@ public class MyLittleHBaseClient { // Thats why we have it inside a try/finally clause scanner.close(); } + + // Close your table and cluster connection. + table.close(); + connection.close(); } }

There are many other methods for putting data into and getting data out of - HBase, but these examples should get you started. See the HTable javadoc for + HBase, but these examples should get you started. See the Table javadoc for more methods. Additionally, there are methods for managing tables in the - HBaseAdmin class.

+ Admin class.

If your client is NOT Java, then you should consider the Thrift or REST libraries.

@@ -168,20 +185,14 @@ public class MyLittleHBaseClient {

Related Documentation

-

There are many other methods for putting data into and getting data out of - HBase, but these examples should get you started. See the HTable javadoc for - more methods. Additionally, there are methods for managing tables in the - HBaseAdmin class.

-

See also the section in the HBase Reference Guide where it discusses HBase Client. It - has section on how to access HBase from inside your multithreaded environtment + has section on how to access HBase from inside your multithreaded environment how to control resources consumed client-side, etc.

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java index f13ce28..5abbcb7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java @@ -30,10 +30,12 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.util.StringUtils; @@ -47,23 +49,33 @@ public class QuotaRetriever implements Closeable, Iterable { private final Queue cache = new LinkedList(); private ResultScanner scanner; - private HTable table; + private Connection connection; + private Table table; private QuotaRetriever() { } void init(final Configuration conf, final Scan scan) throws IOException { - table = new HTable(conf, QuotaTableUtil.QUOTA_TABLE_NAME); + this.connection = ConnectionFactory.createConnection(conf); + this.table = this.connection.getTable(QuotaTableUtil.QUOTA_TABLE_NAME); try { scanner = table.getScanner(scan); } catch (IOException e) { table.close(); + connection.close(); throw e; } } public void close() throws IOException { - table.close(); + if (this.table != null) { + this.table.close(); + this.table = null; + if (this.connection != null) { + this.connection.close(); + this.connection = null; + } + } } public QuotaSettings next() throws IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java index 6153876..548b1af 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java @@ -32,10 +32,12 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.CompareFilter; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterList; @@ -323,21 +325,19 @@ public class QuotaTableUtil { */ protected static Result doGet(final Configuration conf, final Get get) throws IOException { - HTable table = new HTable(conf, QUOTA_TABLE_NAME); - try { - return table.get(get); - } finally { - table.close(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { + return table.get(get); + } } } protected static Result[] doGet(final Configuration conf, final List gets) throws IOException { - HTable table = new HTable(conf, QUOTA_TABLE_NAME); - try { - return table.get(gets); - } finally { - table.close(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { + return table.get(gets); + } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java index 922bf67..a6f550a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java @@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -50,11 +50,7 @@ public class AccessControlClient { public static final TableName ACL_TABLE_NAME = TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "acl"); - private static HTable getAclTable(Configuration conf) throws IOException { - return new HTable(conf, ACL_TABLE_NAME); - } - - private static BlockingInterface getAccessControlServiceStub(HTable ht) + private static BlockingInterface getAccessControlServiceStub(Table ht) throws IOException { CoprocessorRpcChannel service = ht.coprocessorService(HConstants.EMPTY_START_ROW); BlockingInterface protocol = @@ -75,14 +71,10 @@ public class AccessControlClient { public static void grant(Configuration conf, final TableName tableName, final String userName, final byte[] family, final byte[] qual, final Permission.Action... actions) throws Throwable { - HTable ht = null; - try { - ht = getAclTable(conf); - ProtobufUtil.grant(getAccessControlServiceStub(ht), userName, tableName, family, qual, + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + ProtobufUtil.grant(getAccessControlServiceStub(table), userName, tableName, family, qual, actions); - } finally { - if (ht != null) { - ht.close(); } } } @@ -97,26 +89,18 @@ public class AccessControlClient { */ public static void grant(Configuration conf, final String namespace, final String userName, final Permission.Action... actions) throws Throwable { - HTable ht = null; - try { - ht = getAclTable(conf); - ProtobufUtil.grant(getAccessControlServiceStub(ht), userName, namespace, actions); - } finally { - if (ht != null) { - ht.close(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + ProtobufUtil.grant(getAccessControlServiceStub(table), userName, namespace, actions); } } } public static boolean isAccessControllerRunning(Configuration conf) throws MasterNotRunningException, ZooKeeperConnectionException, IOException { - HBaseAdmin ha = null; - try { - ha = new HBaseAdmin(conf); - return ha.isTableAvailable(ACL_TABLE_NAME); - } finally { - if (ha != null) { - ha.close(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Admin admin = connection.getAdmin()) { + return admin.isTableAvailable(ACL_TABLE_NAME); } } } @@ -134,14 +118,10 @@ public class AccessControlClient { public static void revoke(Configuration conf, final TableName tableName, final String username, final byte[] family, final byte[] qualifier, final Permission.Action... actions) throws Throwable { - HTable ht = null; - try { - ht = getAclTable(conf); - ProtobufUtil.revoke(getAccessControlServiceStub(ht), username, tableName, family, qualifier, - actions); - } finally { - if (ht != null) { - ht.close(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + ProtobufUtil.revoke(getAccessControlServiceStub(table), username, tableName, family, + qualifier, actions); } } } @@ -156,13 +136,9 @@ public class AccessControlClient { */ public static void revoke(Configuration conf, final String namespace, final String userName, final Permission.Action... actions) throws Throwable { - HTable ht = null; - try { - ht = getAclTable(conf); - ProtobufUtil.revoke(getAccessControlServiceStub(ht), userName, namespace, actions); - } finally { - if (ht != null) { - ht.close(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + ProtobufUtil.revoke(getAccessControlServiceStub(table), userName, namespace, actions); } } } @@ -177,36 +153,27 @@ public class AccessControlClient { public static List getUserPermissions(Configuration conf, String tableRegex) throws Throwable { List permList = new ArrayList(); - Table ht = null; - Admin ha = null; - try { - ha = new HBaseAdmin(conf); - ht = new HTable(conf, ACL_TABLE_NAME); - CoprocessorRpcChannel service = ht.coprocessorService(HConstants.EMPTY_START_ROW); - BlockingInterface protocol = AccessControlProtos.AccessControlService - .newBlockingStub(service); - HTableDescriptor[] htds = null; - - if (tableRegex == null || tableRegex.isEmpty()) { - permList = ProtobufUtil.getUserPermissions(protocol); - } else if (tableRegex.charAt(0) == '@') { - String namespace = tableRegex.substring(1); - permList = ProtobufUtil.getUserPermissions(protocol, Bytes.toBytes(namespace)); - } else { - htds = ha.listTables(Pattern.compile(tableRegex)); - for (HTableDescriptor hd : htds) { - permList.addAll(ProtobufUtil.getUserPermissions(protocol, hd.getTableName())); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + try (Admin admin = connection.getAdmin()) { + CoprocessorRpcChannel service = table.coprocessorService(HConstants.EMPTY_START_ROW); + BlockingInterface protocol = + AccessControlProtos.AccessControlService.newBlockingStub(service); + HTableDescriptor[] htds = null; + if (tableRegex == null || tableRegex.isEmpty()) { + permList = ProtobufUtil.getUserPermissions(protocol); + } else if (tableRegex.charAt(0) == '@') { + String namespace = tableRegex.substring(1); + permList = ProtobufUtil.getUserPermissions(protocol, Bytes.toBytes(namespace)); + } else { + htds = admin.listTables(Pattern.compile(tableRegex)); + for (HTableDescriptor hd : htds) { + permList.addAll(ProtobufUtil.getUserPermissions(protocol, hd.getTableName())); + } + } } } - } finally { - if (ht != null) { - ht.close(); - } - if (ha != null) { - ha.close(); - } } return permList; } - -} +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java index 5ca5231..20eff02 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java @@ -26,7 +26,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; @@ -73,40 +74,37 @@ public class VisibilityClient { */ public static VisibilityLabelsResponse addLabels(Configuration conf, final String[] labels) throws Throwable { - Table ht = null; - try { - ht = new HTable(conf, LABELS_TABLE_NAME); - Batch.Call callable = - new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(LABELS_TABLE_NAME)) { + Batch.Call callable = + new Batch.Call() { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); - public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { - VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder(); - for (String label : labels) { - if (label.length() > 0) { - VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder(); - newBuilder.setLabel(ByteStringer.wrap(Bytes.toBytes(label))); - builder.addVisLabel(newBuilder.build()); + public VisibilityLabelsResponse call(VisibilityLabelsService service) + throws IOException { + VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder(); + for (String label : labels) { + if (label.length() > 0) { + VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder(); + newBuilder.setLabel(ByteStringer.wrap(Bytes.toBytes(label))); + builder.addVisLabel(newBuilder.build()); + } } + service.addLabels(controller, builder.build(), rpcCallback); + VisibilityLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - service.addLabels(controller, builder.build(), rpcCallback); - VisibilityLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; - Map result = ht.coprocessorService( - VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, - callable); - return result.values().iterator().next(); // There will be exactly one region for labels - // table and so one entry in result Map. - } finally { - if (ht != null) { - ht.close(); + }; + Map result = + table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY, callable); + return result.values().iterator().next(); // There will be exactly one region for labels + // table and so one entry in result Map. } } } @@ -131,33 +129,30 @@ public class VisibilityClient { * @throws Throwable */ public static GetAuthsResponse getAuths(Configuration conf, final String user) throws Throwable { - Table ht = null; - try { - ht = new HTable(conf, LABELS_TABLE_NAME); - Batch.Call callable = - new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(LABELS_TABLE_NAME)) { + Batch.Call callable = + new Batch.Call() { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); - public GetAuthsResponse call(VisibilityLabelsService service) throws IOException { - GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder(); - getAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); - service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback); - GetAuthsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); + public GetAuthsResponse call(VisibilityLabelsService service) throws IOException { + GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder(); + getAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); + service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback); + GetAuthsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - return response; - } - }; - Map result = ht.coprocessorService(VisibilityLabelsService.class, - HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable); - return result.values().iterator().next(); // There will be exactly one region for labels - // table and so one entry in result Map. - } finally { - if (ht != null) { - ht.close(); + }; + Map result = + table.coprocessorService(VisibilityLabelsService.class, + HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable); + return result.values().iterator().next(); // There will be exactly one region for labels + // table and so one entry in result Map. } } } @@ -177,44 +172,40 @@ public class VisibilityClient { private static VisibilityLabelsResponse setOrClearAuths(Configuration conf, final String[] auths, final String user, final boolean setOrClear) throws IOException, ServiceException, Throwable { - Table ht = null; - try { - ht = new HTable(conf, LABELS_TABLE_NAME); - Batch.Call callable = - new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - BlockingRpcCallback rpcCallback = - new BlockingRpcCallback(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(LABELS_TABLE_NAME)) { + Batch.Call callable = + new Batch.Call() { + ServerRpcController controller = new ServerRpcController(); + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); - public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { - SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder(); - setAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); - for (String auth : auths) { - if (auth.length() > 0) { - setAuthReqBuilder.addAuth(ByteStringer.wrap(Bytes.toBytes(auth))); + public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { + SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder(); + setAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); + for (String auth : auths) { + if (auth.length() > 0) { + setAuthReqBuilder.addAuth(ByteStringer.wrap(Bytes.toBytes(auth))); + } } + if (setOrClear) { + service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback); + } else { + service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback); + } + VisibilityLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - if (setOrClear) { - service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback); - } else { - service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback); - } - VisibilityLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; - Map result = ht.coprocessorService( - VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, - callable); - return result.values().iterator().next(); // There will be exactly one region for labels - // table and so one entry in result Map. - } finally { - if (ht != null) { - ht.close(); + }; + Map result = table.coprocessorService( + VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, + callable); + return result.values().iterator().next(); // There will be exactly one region for labels + // table and so one entry in result Map. } } } -} +} \ No newline at end of file diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java index 5bc44ff..c558485 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java @@ -2153,24 +2153,6 @@ public final class ClusterStatusProtos { * */ long getCompleteSequenceId(); - - // optional float data_locality = 16; - /** - * optional float data_locality = 16; - * - *
-     ** The current data locality for region in the regionserver 
-     * 
- */ - boolean hasDataLocality(); - /** - * optional float data_locality = 16; - * - *
-     ** The current data locality for region in the regionserver 
-     * 
- */ - float getDataLocality(); } /** * Protobuf type {@code RegionLoad} @@ -2306,11 +2288,6 @@ public final class ClusterStatusProtos { completeSequenceId_ = input.readUInt64(); break; } - case 133: { - bitField0_ |= 0x00008000; - dataLocality_ = input.readFloat(); - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -2729,30 +2706,6 @@ public final class ClusterStatusProtos { return completeSequenceId_; } - // optional float data_locality = 16; - public static final int DATA_LOCALITY_FIELD_NUMBER = 16; - private float dataLocality_; - /** - * optional float data_locality = 16; - * - *
-     ** The current data locality for region in the regionserver 
-     * 
- */ - public boolean hasDataLocality() { - return ((bitField0_ & 0x00008000) == 0x00008000); - } - /** - * optional float data_locality = 16; - * - *
-     ** The current data locality for region in the regionserver 
-     * 
- */ - public float getDataLocality() { - return dataLocality_; - } - private void initFields() { regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); stores_ = 0; @@ -2769,7 +2722,6 @@ public final class ClusterStatusProtos { totalStaticIndexSizeKB_ = 0; totalStaticBloomSizeKB_ = 0; completeSequenceId_ = 0L; - dataLocality_ = 0F; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -2836,9 +2788,6 @@ public final class ClusterStatusProtos { if (((bitField0_ & 0x00004000) == 0x00004000)) { output.writeUInt64(15, completeSequenceId_); } - if (((bitField0_ & 0x00008000) == 0x00008000)) { - output.writeFloat(16, dataLocality_); - } getUnknownFields().writeTo(output); } @@ -2908,10 +2857,6 @@ public final class ClusterStatusProtos { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(15, completeSequenceId_); } - if (((bitField0_ & 0x00008000) == 0x00008000)) { - size += com.google.protobuf.CodedOutputStream - .computeFloatSize(16, dataLocality_); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -3010,10 +2955,6 @@ public final class ClusterStatusProtos { result = result && (getCompleteSequenceId() == other.getCompleteSequenceId()); } - result = result && (hasDataLocality() == other.hasDataLocality()); - if (hasDataLocality()) { - result = result && (Float.floatToIntBits(getDataLocality()) == Float.floatToIntBits(other.getDataLocality())); - } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -3087,11 +3028,6 @@ public final class ClusterStatusProtos { hash = (37 * hash) + COMPLETE_SEQUENCE_ID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getCompleteSequenceId()); } - if (hasDataLocality()) { - hash = (37 * hash) + DATA_LOCALITY_FIELD_NUMBER; - hash = (53 * hash) + Float.floatToIntBits( - getDataLocality()); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -3236,8 +3172,6 @@ public final class ClusterStatusProtos { bitField0_ = (bitField0_ & ~0x00002000); completeSequenceId_ = 0L; bitField0_ = (bitField0_ & ~0x00004000); - dataLocality_ = 0F; - bitField0_ = (bitField0_ & ~0x00008000); return this; } @@ -3330,10 +3264,6 @@ public final class ClusterStatusProtos { to_bitField0_ |= 0x00004000; } result.completeSequenceId_ = completeSequenceId_; - if (((from_bitField0_ & 0x00008000) == 0x00008000)) { - to_bitField0_ |= 0x00008000; - } - result.dataLocality_ = dataLocality_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -3395,9 +3325,6 @@ public final class ClusterStatusProtos { if (other.hasCompleteSequenceId()) { setCompleteSequenceId(other.getCompleteSequenceId()); } - if (other.hasDataLocality()) { - setDataLocality(other.getDataLocality()); - } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -4288,55 +4215,6 @@ public final class ClusterStatusProtos { return this; } - // optional float data_locality = 16; - private float dataLocality_ ; - /** - * optional float data_locality = 16; - * - *
-       ** The current data locality for region in the regionserver 
-       * 
- */ - public boolean hasDataLocality() { - return ((bitField0_ & 0x00008000) == 0x00008000); - } - /** - * optional float data_locality = 16; - * - *
-       ** The current data locality for region in the regionserver 
-       * 
- */ - public float getDataLocality() { - return dataLocality_; - } - /** - * optional float data_locality = 16; - * - *
-       ** The current data locality for region in the regionserver 
-       * 
- */ - public Builder setDataLocality(float value) { - bitField0_ |= 0x00008000; - dataLocality_ = value; - onChanged(); - return this; - } - /** - * optional float data_locality = 16; - * - *
-       ** The current data locality for region in the regionserver 
-       * 
- */ - public Builder clearDataLocality() { - bitField0_ = (bitField0_ & ~0x00008000); - dataLocality_ = 0F; - onChanged(); - return this; - } - // @@protoc_insertion_point(builder_scope:RegionLoad) } @@ -10472,7 +10350,7 @@ public final class ClusterStatusProtos { "PLITTING_NEW\020\r\022\017\n\013MERGING_NEW\020\016\"X\n\022Regio", "nInTransition\022\036\n\004spec\030\001 \002(\0132\020.RegionSpec" + "ifier\022\"\n\014region_state\030\002 \002(\0132\014.RegionStat" + - "e\"\347\003\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" + + "e\"\320\003\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" + "(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\n" + "storefiles\030\003 \001(\r\022\"\n\032store_uncompressed_s" + "ize_MB\030\004 \001(\r\022\031\n\021storefile_size_MB\030\005 \001(\r\022" + @@ -10483,27 +10361,27 @@ public final class ClusterStatusProtos { "ompacted_KVs\030\013 \001(\004\022\032\n\022root_index_size_KB" + "\030\014 \001(\r\022\"\n\032total_static_index_size_KB\030\r \001" + "(\r\022\"\n\032total_static_bloom_size_KB\030\016 \001(\r\022\034" + - "\n\024complete_sequence_id\030\017 \001(\004\022\025\n\rdata_loc" + - "ality\030\020 \001(\002\"\212\002\n\nServerLoad\022\032\n\022number_of_" + - "requests\030\001 \001(\r\022 \n\030total_number_of_reques" + - "ts\030\002 \001(\r\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013max_he" + - "ap_MB\030\004 \001(\r\022!\n\014region_loads\030\005 \003(\0132\013.Regi" + - "onLoad\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocess" + - "or\022\031\n\021report_start_time\030\007 \001(\004\022\027\n\017report_", - "end_time\030\010 \001(\004\022\030\n\020info_server_port\030\t \001(\r" + - "\"O\n\016LiveServerInfo\022\033\n\006server\030\001 \002(\0132\013.Ser" + - "verName\022 \n\013server_load\030\002 \002(\0132\013.ServerLoa" + - "d\"\340\002\n\rClusterStatus\022/\n\rhbase_version\030\001 \001" + - "(\0132\030.HBaseVersionFileContent\022%\n\014live_ser" + - "vers\030\002 \003(\0132\017.LiveServerInfo\022!\n\014dead_serv" + - "ers\030\003 \003(\0132\013.ServerName\0222\n\025regions_in_tra" + - "nsition\030\004 \003(\0132\023.RegionInTransition\022\036\n\ncl" + - "uster_id\030\005 \001(\0132\n.ClusterId\022)\n\023master_cop" + - "rocessors\030\006 \003(\0132\014.Coprocessor\022\033\n\006master\030", - "\007 \001(\0132\013.ServerName\022#\n\016backup_masters\030\010 \003" + - "(\0132\013.ServerName\022\023\n\013balancer_on\030\t \001(\010BF\n*" + - "org.apache.hadoop.hbase.protobuf.generat" + - "edB\023ClusterStatusProtosH\001\240\001\001" + "\n\024complete_sequence_id\030\017 \001(\004\"\212\002\n\nServerL" + + "oad\022\032\n\022number_of_requests\030\001 \001(\r\022 \n\030total" + + "_number_of_requests\030\002 \001(\r\022\024\n\014used_heap_M" + + "B\030\003 \001(\r\022\023\n\013max_heap_MB\030\004 \001(\r\022!\n\014region_l" + + "oads\030\005 \003(\0132\013.RegionLoad\022\"\n\014coprocessors\030" + + "\006 \003(\0132\014.Coprocessor\022\031\n\021report_start_time" + + "\030\007 \001(\004\022\027\n\017report_end_time\030\010 \001(\004\022\030\n\020info_", + "server_port\030\t \001(\r\"O\n\016LiveServerInfo\022\033\n\006s" + + "erver\030\001 \002(\0132\013.ServerName\022 \n\013server_load\030" + + "\002 \002(\0132\013.ServerLoad\"\340\002\n\rClusterStatus\022/\n\r" + + "hbase_version\030\001 \001(\0132\030.HBaseVersionFileCo" + + "ntent\022%\n\014live_servers\030\002 \003(\0132\017.LiveServer" + + "Info\022!\n\014dead_servers\030\003 \003(\0132\013.ServerName\022" + + "2\n\025regions_in_transition\030\004 \003(\0132\023.RegionI" + + "nTransition\022\036\n\ncluster_id\030\005 \001(\0132\n.Cluste" + + "rId\022)\n\023master_coprocessors\030\006 \003(\0132\014.Copro" + + "cessor\022\033\n\006master\030\007 \001(\0132\013.ServerName\022#\n\016b", + "ackup_masters\030\010 \003(\0132\013.ServerName\022\023\n\013bala" + + "ncer_on\030\t \001(\010BF\n*org.apache.hadoop.hbase" + + ".protobuf.generatedB\023ClusterStatusProtos" + + "H\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -10527,7 +10405,7 @@ public final class ClusterStatusProtos { internal_static_RegionLoad_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionLoad_descriptor, - new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", }); + new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", }); internal_static_ServerLoad_descriptor = getDescriptor().getMessageTypes().get(3); internal_static_ServerLoad_fieldAccessorTable = new diff --git a/hbase-protocol/src/main/protobuf/ClusterStatus.proto b/hbase-protocol/src/main/protobuf/ClusterStatus.proto index 7e78395..dbf00dc 100644 --- a/hbase-protocol/src/main/protobuf/ClusterStatus.proto +++ b/hbase-protocol/src/main/protobuf/ClusterStatus.proto @@ -110,9 +110,6 @@ message RegionLoad { /** the most recent sequence Id from cache flush */ optional uint64 complete_sequence_id = 15; - - /** The current data locality for region in the regionserver */ - optional float data_locality = 16; } /* Server-level protobufs */ diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon index 6ca8ec6..3ff4cb6 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon @@ -154,7 +154,6 @@ Storefile Size Index Size Bloom Size - Data Locality <%for HRegionInfo r: onlineRegions %> @@ -172,7 +171,6 @@ <% load.getStorefileSizeMB() %>m <% load.getTotalStaticIndexSizeKB() %>k <% load.getTotalStaticBloomSizeKB() %>k - <% load.getDataLocality() %> diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java index 660733d..aaf3ddf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java @@ -18,10 +18,12 @@ */ package org.apache.hadoop.hbase.client; -import com.google.protobuf.Descriptors.MethodDescriptor; -import com.google.protobuf.Message; -import com.google.protobuf.Service; -import com.google.protobuf.ServiceException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutorService; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; @@ -32,11 +34,10 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.io.MultipleIOException; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ExecutorService; +import com.google.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.Message; +import com.google.protobuf.Service; +import com.google.protobuf.ServiceException; /** * A wrapper for HTable. Can be used to restrict privilege. @@ -55,7 +56,7 @@ import java.util.concurrent.ExecutorService; public class HTableWrapper implements HTableInterface { private TableName tableName; - private HTable table; + private final Table table; private ClusterConnection connection; private final List openTables; @@ -73,7 +74,7 @@ public class HTableWrapper implements HTableInterface { ClusterConnection connection, ExecutorService pool) throws IOException { this.tableName = tableName; - this.table = new HTable(tableName, connection, pool); + this.table = connection.getTable(tableName, pool); this.connection = connection; this.openTables = openTables; this.openTables.add(this); @@ -82,7 +83,7 @@ public class HTableWrapper implements HTableInterface { public void internalClose() throws IOException { List exceptions = new ArrayList(2); try { - table.close(); + table.close(); } catch (IOException e) { exceptions.add(e); } @@ -114,7 +115,12 @@ public class HTableWrapper implements HTableInterface { @Deprecated public Result getRowOrBefore(byte[] row, byte[] family) throws IOException { - return table.getRowOrBefore(row, family); + Scan scan = Scan.createGetClosestRowOrBeforeReverseScan(row, family); + Result startRowResult = null; + try (ResultScanner resultScanner = this.table.getScanner(scan)) { + startRowResult = resultScanner.next(); + } + return startRowResult; } public Result get(Get get) throws IOException { @@ -130,8 +136,15 @@ public class HTableWrapper implements HTableInterface { } @Deprecated - public Boolean[] exists(List gets) throws IOException{ - return table.exists(gets); + public Boolean[] exists(List gets) throws IOException { + // Do convertion. + boolean [] exists = table.existsAll(gets); + if (exists == null) return null; + Boolean [] results = new Boolean [exists.length]; + for (int i = 0; i < exists.length; i++) { + results[i] = exists[i]? Boolean.TRUE: Boolean.FALSE; + } + return results; } public void put(Put put) throws IOException { @@ -296,12 +309,12 @@ public class HTableWrapper implements HTableInterface { @Override public void setAutoFlush(boolean autoFlush) { - table.setAutoFlush(autoFlush, autoFlush); + table.setAutoFlushTo(autoFlush); } @Override public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) { - table.setAutoFlush(autoFlush, clearBufferOnFail); + throw new UnsupportedOperationException(); } @Override @@ -322,7 +335,8 @@ public class HTableWrapper implements HTableInterface { @Override public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, boolean writeToWAL) throws IOException { - return table.incrementColumnValue(row, family, qualifier, amount, writeToWAL); + return table.incrementColumnValue(row, family, qualifier, amount, + writeToWAL? Durability.USE_DEFAULT: Durability.SKIP_WAL); } @Override @@ -346,4 +360,4 @@ public class HTableWrapper implements HTableInterface { CompareOp compareOp, byte[] value, RowMutations rm) throws IOException { return table.checkAndMutate(row, family, qualifier, compareOp, value, rm); } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java index 11acea0..5444c6a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java @@ -22,11 +22,12 @@ import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; @@ -47,19 +48,22 @@ import org.apache.hadoop.mapred.Partitioner; public class HRegionPartitioner implements Partitioner { private static final Log LOG = LogFactory.getLog(HRegionPartitioner.class); - private RegionLocator table; + // Are connection and locator cleaned up or just die when partitioner is done? + private Connection connection; + private RegionLocator locator; private byte[][] startKeys; public void configure(JobConf job) { try { - this.table = new HTable(HBaseConfiguration.create(job), - TableName.valueOf(job.get(TableOutputFormat.OUTPUT_TABLE))); + this.connection = ConnectionFactory.createConnection(HBaseConfiguration.create(job)); + TableName tableName = TableName.valueOf(job.get(TableOutputFormat.OUTPUT_TABLE)); + this.locator = this.connection.getRegionLocator(tableName); } catch (IOException e) { LOG.error(e); } try { - this.startKeys = this.table.getStartKeys(); + this.startKeys = this.locator.getStartKeys(); } catch (IOException e) { LOG.error(e); } @@ -75,7 +79,7 @@ implements Partitioner { try { // Not sure if this is cached after a split so we could have problems // here if a region splits while mapping - region = table.getRegionLocation(key.get()).getRegionInfo().getStartKey(); + region = locator.getRegionLocation(key.get()).getRegionInfo().getStartKey(); } catch (IOException e) { LOG.error(e); } @@ -92,4 +96,4 @@ implements Partitioner { // if above fails to find start key that match we need to return something return 0; } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java index 5a5f544..906d074 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java @@ -107,4 +107,4 @@ FileOutputFormat { throw new IOException("Must specify table name"); } } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java index 150bb25..3fa71f9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java @@ -28,11 +28,12 @@ import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.Tag; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; @@ -66,44 +67,55 @@ public class DefaultVisibilityExpressionResolver implements VisibilityExpression @Override public void init() { // Reading all the labels and ordinal. - // This scan should be done by user with global_admin previliges.. Ensure that it works + // This scan should be done by user with global_admin privileges.. Ensure that it works Table labelsTable = null; + Connection connection = null; try { - labelsTable = new HTable(conf, LABELS_TABLE_NAME); - } catch (TableNotFoundException e) { - // Just return with out doing any thing. When the VC is not used we wont be having 'labels' - // table in the cluster. - return; - } catch (IOException e) { - LOG.error("Error opening 'labels' table", e); - return; - } - Scan scan = new Scan(); - scan.setAuthorizations(new Authorizations(VisibilityUtils.SYSTEM_LABEL)); - scan.addColumn(LABELS_TABLE_FAMILY, LABEL_QUALIFIER); - ResultScanner scanner = null; - try { - scanner = labelsTable.getScanner(scan); - Result next = null; - while ((next = scanner.next()) != null) { - byte[] row = next.getRow(); - byte[] value = next.getValue(LABELS_TABLE_FAMILY, LABEL_QUALIFIER); - labels.put(Bytes.toString(value), Bytes.toInt(row)); + connection = ConnectionFactory.createConnection(conf); + try { + labelsTable = connection.getTable(LABELS_TABLE_NAME); + } catch (TableNotFoundException e) { + // Just return with out doing any thing. When the VC is not used we wont be having 'labels' + // table in the cluster. + return; + } catch (IOException e) { + LOG.error("Error opening 'labels' table", e); + return; } - } catch (IOException e) { - LOG.error("Error reading 'labels' table", e); - } finally { + Scan scan = new Scan(); + scan.setAuthorizations(new Authorizations(VisibilityUtils.SYSTEM_LABEL)); + scan.addColumn(LABELS_TABLE_FAMILY, LABEL_QUALIFIER); + ResultScanner scanner = null; try { - if (scanner != null) { - scanner.close(); + scanner = labelsTable.getScanner(scan); + Result next = null; + while ((next = scanner.next()) != null) { + byte[] row = next.getRow(); + byte[] value = next.getValue(LABELS_TABLE_FAMILY, LABEL_QUALIFIER); + labels.put(Bytes.toString(value), Bytes.toInt(row)); } + } catch (IOException e) { + LOG.error("Error scanning 'labels' table", e); } finally { + if (scanner != null) scanner.close(); + } + } catch (IOException ioe) { + LOG.error("Failed reading 'labels' tags", ioe); + return; + } finally { + if (labelsTable != null) { try { labelsTable.close(); - } catch (IOException e) { - LOG.warn("Error on closing 'labels' table", e); + } catch (IOException ioe) { + LOG.warn("Error closing 'labels' table", ioe); } } + if (connection != null) + try { + connection.close(); + } catch (IOException ioe) { + LOG.warn("Failed close of temporary connection", ioe); + } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java index 01c1f89..d0810cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeAssignmentHelper.java @@ -25,21 +25,21 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Random; import java.util.Set; -import java.util.Map.Entry; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.RackManager; @@ -121,12 +121,10 @@ public class FavoredNodeAssignmentHelper { } } // Write the region assignments to the meta table. - Table metaTable = null; - try { - metaTable = new HTable(conf, TableName.META_TABLE_NAME); - metaTable.put(puts); - } finally { - if (metaTable != null) metaTable.close(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { + metaTable.put(puts); + } } LOG.info("Added " + puts.size() + " regions in META"); } @@ -304,7 +302,6 @@ public class FavoredNodeAssignmentHelper { * primary/secondary/tertiary RegionServers * @param primaryRSMap * @return the map of regions to the servers the region-files should be hosted on - * @throws IOException */ public Map placeSecondaryAndTertiaryWithRestrictions( Map primaryRSMap) { @@ -603,4 +600,4 @@ public class FavoredNodeAssignmentHelper { } return strBuf.toString(); } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java index a778c26..c0eea7c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java @@ -25,15 +25,15 @@ import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableDescriptor; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; @@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.Bytes; @InterfaceAudience.Private @@ -101,18 +100,15 @@ public class ModifyTableHandler extends TableEventHandler { Set tableRows = new HashSet(); Scan scan = MetaTableAccessor.getScanForTableName(table); scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); - Table htable = null; - try { - htable = new HTable(masterServices.getConfiguration(), TableName.META_TABLE_NAME); - ResultScanner resScanner = htable.getScanner(scan); - for (Result result : resScanner) { - tableRows.add(result.getRow()); - } - MetaTableAccessor.removeRegionReplicasFromMeta(tableRows, newReplicaCount, + try (Connection connection = + ConnectionFactory.createConnection(masterServices.getConfiguration())) { + try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { + ResultScanner resScanner = metaTable.getScanner(scan); + for (Result result : resScanner) { + tableRows.add(result.getRow()); + } + MetaTableAccessor.removeRegionReplicasFromMeta(tableRows, newReplicaCount, oldReplicaCount - newReplicaCount, masterServices.getShortCircuitConnection()); - } finally { - if (htable != null) { - htable.close(); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java index 66c45a4..aad3883 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java @@ -28,19 +28,22 @@ import java.util.TreeMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.CoordinatedStateException; -import org.apache.hadoop.hbase.TableDescriptor; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; @@ -48,10 +51,10 @@ import org.apache.hadoop.hbase.master.BulkReOpen; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; /** * Base class for performing operations against tables. @@ -175,32 +178,33 @@ public abstract class TableEventHandler extends EventHandler { public boolean reOpenAllRegions(List regions) throws IOException { boolean done = false; LOG.info("Bucketing regions by region server..."); - HTable table = new HTable(masterServices.getConfiguration(), tableName); - TreeMap> serverToRegions = Maps - .newTreeMap(); - NavigableMap hriHserverMapping; - try { - hriHserverMapping = table.getRegionLocations(); - } finally { - table.close(); + List regionLocations = null; + try (Connection connection = ConnectionFactory.createConnection(masterServices.getConfiguration())) { + try (RegionLocator locator = connection.getRegionLocator(tableName)) { + regionLocations = locator.getAllRegionLocations(); + } } - + // Convert List to Map. + NavigableMap hri2Sn = new TreeMap(); + for (HRegionLocation location: regionLocations) { + hri2Sn.put(location.getRegionInfo(), location.getServerName()); + } + TreeMap> serverToRegions = Maps.newTreeMap(); List reRegions = new ArrayList(); for (HRegionInfo hri : regions) { - ServerName rsLocation = hriHserverMapping.get(hri); - + ServerName sn = hri2Sn.get(hri); // Skip the offlined split parent region // See HBASE-4578 for more information. - if (null == rsLocation) { + if (null == sn) { LOG.info("Skip " + hri); continue; } - if (!serverToRegions.containsKey(rsLocation)) { + if (!serverToRegions.containsKey(sn)) { LinkedList hriList = Lists.newLinkedList(); - serverToRegions.put(rsLocation, hriList); + serverToRegions.put(sn, hriList); } reRegions.add(hri); - serverToRegions.get(rsLocation).add(hri); + serverToRegions.get(sn).add(hri); } LOG.info("Reopening " + reRegions.size() + " regions on " diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java index 5db30eb..b01be4d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java @@ -19,15 +19,12 @@ package org.apache.hadoop.hbase.quotas; import java.io.IOException; - import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -35,18 +32,20 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.security.UserGroupInformation; /** * Helper class to interact with the quota table @@ -268,21 +267,19 @@ public class QuotaUtil extends QuotaTableUtil { */ private static void doPut(final Configuration conf, final Put put) throws IOException { - HTable table = new HTable(conf, QuotaUtil.QUOTA_TABLE_NAME); - try { - table.put(put); - } finally { - table.close(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) { + table.put(put); + } } } private static void doDelete(final Configuration conf, final Delete delete) throws IOException { - HTable table = new HTable(conf, QuotaUtil.QUOTA_TABLE_NAME); - try { - table.delete(delete); - } finally { - table.close(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) { + table.delete(delete); + } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 45e5558..71f06f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1357,8 +1357,6 @@ public class HRegionServer extends HasThread implements (int) (store.getTotalStaticBloomSize() / 1024); } } - float dataLocality = - r.getHDFSBlocksDistribution().getBlockLocalityIndex(serverName.getHostname()); if (regionLoadBldr == null) { regionLoadBldr = RegionLoad.newBuilder(); } @@ -1381,8 +1379,7 @@ public class HRegionServer extends HasThread implements .setWriteRequestsCount(r.writeRequestsCount.get()) .setTotalCompactingKVs(totalCompactingKVs) .setCurrentCompactedKVs(currentCompactedKVs) - .setCompleteSequenceId(r.lastFlushSeqId) - .setDataLocality(dataLocality); + .setCompleteSequenceId(r.lastFlushSeqId); return regionLoadBldr.build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java index c49bc33..20f79a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java @@ -33,7 +33,6 @@ import java.util.TreeSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -44,9 +43,11 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -173,12 +174,10 @@ public class AccessControlLists { Bytes.toString(key)+": "+Bytes.toStringBinary(value) ); } - Table acls = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - acls.put(p); - } finally { - if (acls != null) acls.close(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + table.put(p); + } } } @@ -203,13 +202,11 @@ public class AccessControlLists { if (LOG.isDebugEnabled()) { LOG.debug("Removing permission "+ userPerm.toString()); } - d.deleteColumns(ACL_LIST_FAMILY, key); - Table acls = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - acls.delete(d); - } finally { - if (acls != null) acls.close(); + d.addColumns(ACL_LIST_FAMILY, key); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + table.delete(d); + } } } @@ -224,12 +221,10 @@ public class AccessControlLists { LOG.debug("Removing permissions of removed table "+ tableName); } - Table acls = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - acls.delete(d); - } finally { - if (acls != null) acls.close(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + table.delete(d); + } } } @@ -244,12 +239,10 @@ public class AccessControlLists { LOG.debug("Removing permissions of removed namespace "+ namespace); } - Table acls = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - acls.delete(d); - } finally { - if (acls != null) acls.close(); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + table.delete(d); + } } } @@ -264,40 +257,37 @@ public class AccessControlLists { " from table "+ tableName); } - Table acls = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - - Scan scan = new Scan(); - scan.addFamily(ACL_LIST_FAMILY); - - String columnName = Bytes.toString(column); - scan.setFilter(new QualifierFilter(CompareOp.EQUAL, new RegexStringComparator( - String.format("(%s%s%s)|(%s%s)$", - ACL_KEY_DELIMITER, columnName, ACL_KEY_DELIMITER, - ACL_KEY_DELIMITER, columnName)))); - - Set qualifierSet = new TreeSet(Bytes.BYTES_COMPARATOR); - ResultScanner scanner = acls.getScanner(scan); - try { - for (Result res : scanner) { - for (byte[] q : res.getFamilyMap(ACL_LIST_FAMILY).navigableKeySet()) { - qualifierSet.add(q); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + Scan scan = new Scan(); + scan.addFamily(ACL_LIST_FAMILY); + + String columnName = Bytes.toString(column); + scan.setFilter(new QualifierFilter(CompareOp.EQUAL, new RegexStringComparator( + String.format("(%s%s%s)|(%s%s)$", + ACL_KEY_DELIMITER, columnName, ACL_KEY_DELIMITER, + ACL_KEY_DELIMITER, columnName)))); + + Set qualifierSet = new TreeSet(Bytes.BYTES_COMPARATOR); + ResultScanner scanner = table.getScanner(scan); + try { + for (Result res : scanner) { + for (byte[] q : res.getFamilyMap(ACL_LIST_FAMILY).navigableKeySet()) { + qualifierSet.add(q); + } } + } finally { + scanner.close(); } - } finally { - scanner.close(); - } - if (qualifierSet.size() > 0) { - Delete d = new Delete(tableName.getName()); - for (byte[] qualifier : qualifierSet) { - d.deleteColumns(ACL_LIST_FAMILY, qualifier); + if (qualifierSet.size() > 0) { + Delete d = new Delete(tableName.getName()); + for (byte[] qualifier : qualifierSet) { + d.addColumns(ACL_LIST_FAMILY, qualifier); + } + table.delete(d); } - acls.delete(d); } - } finally { - if (acls != null) acls.close(); } } @@ -425,19 +415,19 @@ public class AccessControlLists { Scan scan = new Scan(); scan.addFamily(ACL_LIST_FAMILY); - Table acls = null; ResultScanner scanner = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - scanner = acls.getScanner(scan); - for (Result row : scanner) { - ListMultimap resultPerms = - parsePermissions(row.getRow(), row); - allPerms.put(row.getRow(), resultPerms); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + scanner = table.getScanner(scan); + try { + for (Result row : scanner) { + ListMultimap resultPerms = parsePermissions(row.getRow(), row); + allPerms.put(row.getRow(), resultPerms); + } + } finally { + if (scanner != null) scanner.close(); + } } - } finally { - if (scanner != null) scanner.close(); - if (acls != null) acls.close(); } return allPerms; @@ -468,20 +458,18 @@ public class AccessControlLists { // for normal user tables, we just read the table row from _acl_ ListMultimap perms = ArrayListMultimap.create(); - Table acls = null; - try { - acls = new HTable(conf, ACL_TABLE_NAME); - Get get = new Get(entryName); - get.addFamily(ACL_LIST_FAMILY); - Result row = acls.get(get); - if (!row.isEmpty()) { - perms = parsePermissions(entryName, row); - } else { - LOG.info("No permissions found in " + ACL_TABLE_NAME + " for acl entry " - + Bytes.toString(entryName)); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + Get get = new Get(entryName); + get.addFamily(ACL_LIST_FAMILY); + Result row = table.get(get); + if (!row.isEmpty()) { + perms = parsePermissions(entryName, row); + } else { + LOG.info("No permissions found in " + ACL_TABLE_NAME + " for acl entry " + + Bytes.toString(entryName)); + } } - } finally { - if (acls != null) acls.close(); } return perms; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java index 3a37049..055a606 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenUtil.java @@ -23,13 +23,15 @@ import java.lang.reflect.UndeclaredThrowableException; import java.security.PrivilegedExceptionAction; import com.google.protobuf.ServiceException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -54,21 +56,18 @@ public class TokenUtil { */ public static Token obtainToken( Configuration conf) throws IOException { - Table meta = null; - try { - meta = new HTable(conf, TableName.META_TABLE_NAME); - CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW); - AuthenticationProtos.AuthenticationService.BlockingInterface service = - AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel); - AuthenticationProtos.GetAuthenticationTokenResponse response = service.getAuthenticationToken(null, - AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance()); + try (Connection connection = ConnectionFactory.createConnection(conf)) { + try (Table meta = connection.getTable(TableName.META_TABLE_NAME)) { + CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW); + AuthenticationProtos.AuthenticationService.BlockingInterface service = + AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel); + AuthenticationProtos.GetAuthenticationTokenResponse response = + service.getAuthenticationToken(null, + AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance()); - return ProtobufUtil.toToken(response.getToken()); - } catch (ServiceException se) { - ProtobufUtil.toIOException(se); - } finally { - if (meta != null) { - meta.close(); + return ProtobufUtil.toToken(response.getToken()); + } catch (ServiceException se) { + ProtobufUtil.toIOException(se); } } // dummy return for ServiceException catch block diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index e6e975f..9d9322a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.tool; +import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -38,15 +39,17 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; @@ -227,51 +230,59 @@ public final class Canary implements Tool { } } - // start to prepare the stuffs + // Start to prepare the stuffs Monitor monitor = null; Thread monitorThread = null; long startTime = 0; long currentTimeLength = 0; + // Get a connection to use in below. + // try-with-resources jdk7 construct. See + // http://docs.oracle.com/javase/tutorial/essential/exceptions/tryResourceClose.html + try (Connection connection = ConnectionFactory.createConnection(this.conf)) { + do { + // Do monitor !! + try { + monitor = this.newMonitor(connection, index, args); + monitorThread = new Thread(monitor); + startTime = System.currentTimeMillis(); + monitorThread.start(); + while (!monitor.isDone()) { + // wait for 1 sec + Thread.sleep(1000); + // exit if any error occurs + if (this.failOnError && monitor.hasError()) { + monitorThread.interrupt(); + if (monitor.initialized) { + System.exit(monitor.errorCode); + } else { + System.exit(INIT_ERROR_EXIT_CODE); + } + } + currentTimeLength = System.currentTimeMillis() - startTime; + if (currentTimeLength > this.timeout) { + LOG.error("The monitor is running too long (" + currentTimeLength + + ") after timeout limit:" + this.timeout + + " will be killed itself !!"); + if (monitor.initialized) { + System.exit(TIMEOUT_ERROR_EXIT_CODE); + } else { + System.exit(INIT_ERROR_EXIT_CODE); + } + break; + } + } - do { - // do monitor !! - monitor = this.newMonitor(index, args); - monitorThread = new Thread(monitor); - startTime = System.currentTimeMillis(); - monitorThread.start(); - while (!monitor.isDone()) { - // wait for 1 sec - Thread.sleep(1000); - // exit if any error occurs - if (this.failOnError && monitor.hasError()) { - monitorThread.interrupt(); - if (monitor.initialized) { + if (this.failOnError && monitor.hasError()) { + monitorThread.interrupt(); System.exit(monitor.errorCode); - } else { - System.exit(INIT_ERROR_EXIT_CODE); - } - } - currentTimeLength = System.currentTimeMillis() - startTime; - if (currentTimeLength > this.timeout) { - LOG.error("The monitor is running too long (" + currentTimeLength - + ") after timeout limit:" + this.timeout - + " will be killed itself !!"); - if (monitor.initialized) { - System.exit(TIMEOUT_ERROR_EXIT_CODE); - } else { - System.exit(INIT_ERROR_EXIT_CODE); } - break; + } finally { + if (monitor != null) monitor.close(); } - } - - if (this.failOnError && monitor.hasError()) { - monitorThread.interrupt(); - System.exit(monitor.errorCode); - } - Thread.sleep(interval); - } while (interval > 0); + Thread.sleep(interval); + } while (interval > 0); + } // try-with-resources close return(monitor.errorCode); } @@ -295,13 +306,13 @@ public final class Canary implements Tool { } /** - * a Factory method for {@link Monitor}. - * Can be overrided by user. + * A Factory method for {@link Monitor}. + * Can be overridden by user. * @param index a start index for monitor target * @param args args passed from user * @return a Monitor instance */ - public Monitor newMonitor(int index, String[] args) { + public Monitor newMonitor(final Connection connection, int index, String[] args) { Monitor monitor = null; String[] monitorTargets = null; @@ -313,20 +324,20 @@ public final class Canary implements Tool { if(this.regionServerMode) { monitor = new RegionServerMonitor( - this.conf, + connection, monitorTargets, this.useRegExp, (ExtendedSink)this.sink); } else { - monitor = new RegionMonitor(this.conf, monitorTargets, this.useRegExp, this.sink); + monitor = new RegionMonitor(connection, monitorTargets, this.useRegExp, this.sink); } return monitor; } // a Monitor super-class can be extended by users - public static abstract class Monitor implements Runnable { + public static abstract class Monitor implements Runnable, Closeable { - protected Configuration config; + protected Connection connection; protected Admin admin; protected String[] targets; protected boolean useRegExp; @@ -344,12 +355,16 @@ public final class Canary implements Tool { return errorCode != 0; } - protected Monitor(Configuration config, String[] monitorTargets, + @Override + public void close() throws IOException { + if (this.admin != null) this.admin.close(); + } + + protected Monitor(Connection connection, String[] monitorTargets, boolean useRegExp, Sink sink) { - if (null == config) - throw new IllegalArgumentException("config shall not be null"); + if (null == connection) throw new IllegalArgumentException("connection shall not be null"); - this.config = config; + this.connection = connection; this.targets = monitorTargets; this.useRegExp = useRegExp; this.sink = sink; @@ -360,7 +375,7 @@ public final class Canary implements Tool { protected boolean initAdmin() { if (null == this.admin) { try { - this.admin = new HBaseAdmin(config); + this.admin = this.connection.getAdmin(); } catch (Exception e) { LOG.error("Initial HBaseAdmin failed...", e); this.errorCode = INIT_ERROR_EXIT_CODE; @@ -376,9 +391,9 @@ public final class Canary implements Tool { // a monitor for region mode private static class RegionMonitor extends Monitor { - public RegionMonitor(Configuration config, String[] monitorTargets, + public RegionMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, Sink sink) { - super(config, monitorTargets, useRegExp, sink); + super(connection, monitorTargets, useRegExp, sink); } @Override @@ -480,7 +495,7 @@ public final class Canary implements Tool { Table table = null; try { - table = new HTable(admin.getConfiguration(), tableDesc.getTableName()); + table = admin.getConnection().getTable(tableDesc.getTableName()); } catch (TableNotFoundException e) { return; } @@ -555,9 +570,9 @@ public final class Canary implements Tool { //a monitor for regionserver mode private static class RegionServerMonitor extends Monitor { - public RegionServerMonitor(Configuration config, String[] monitorTargets, + public RegionServerMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, ExtendedSink sink) { - super(config, monitorTargets, useRegExp, sink); + super(connection, monitorTargets, useRegExp, sink); } private ExtendedSink getSink() { @@ -621,7 +636,7 @@ public final class Canary implements Tool { region = entry.getValue().get(0); try { tableName = region.getTable(); - table = new HTable(this.admin.getConfiguration(), tableName); + table = admin.getConnection().getTable(tableName); startKey = region.getStartKey(); // Can't do a get on empty start row so do a Scan of first element if any instead. if(startKey.length > 0) { @@ -674,18 +689,19 @@ public final class Canary implements Tool { private Map> getAllRegionServerByName() { Map> rsAndRMap = new HashMap>(); - HTable table = null; + Table table = null; + RegionLocator regionLocator = null; try { HTableDescriptor[] tableDescs = this.admin.listTables(); List regions = null; for (HTableDescriptor tableDesc : tableDescs) { - table = new HTable(this.admin.getConfiguration(), tableDesc.getTableName()); + table = this.admin.getConnection().getTable(tableDesc.getTableName()); + regionLocator = this.admin.getConnection().getRegionLocator(tableDesc.getTableName()); - for (Map.Entry entry : table - .getRegionLocations().entrySet()) { - ServerName rs = entry.getValue(); + for (HRegionLocation location: regionLocator.getAllRegionLocations()) { + ServerName rs = location.getServerName(); String rsName = rs.getHostname(); - HRegionInfo r = entry.getKey(); + HRegionInfo r = location.getRegionInfo(); if (rsAndRMap.containsKey(rsName)) { regions = rsAndRMap.get(rsName); diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 2b7829c..519f883 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -48,10 +48,10 @@ String tableHeader; boolean withReplica = false; if (table.getTableDescriptor().getRegionReplication() > 1) { - tableHeader = "

Table Regions

"; + tableHeader = "

Table Regions

NameRegion ServerStart KeyEnd KeyLocalityRequestsReplicaID
"; withReplica = true; } else { - tableHeader = "

Table Regions

NameRegion ServerStart KeyEnd KeyRequestsReplicaID
"; + tableHeader = "

Table Regions

NameRegion ServerStart KeyEnd KeyLocalityRequests
"; } ServerName rl = metaTableLocator.getMetaRegionLocation(master.getZooKeeper()); boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false); @@ -212,10 +212,9 @@ + - - <% } %>
NameRegion ServerStart KeyEnd KeyRequests
<%= escapeXml(meta.getRegionNameAsString()) %> <%= metaLocation.getHostname().toString() + ":" + master.getRegionServerInfoPort(metaLocation) %>- <%= escapeXml(Bytes.toString(meta.getStartKey())) %> <%= escapeXml(Bytes.toString(meta.getEndKey())) %>--
@@ -269,7 +268,7 @@ HRegionInfo regionInfo = hriEntry.getKey(); ServerName addr = hriEntry.getValue(); long req = 0; - float locality = 0.0f; + String urlRegionServer = null; if (addr != null) { @@ -278,7 +277,6 @@ Map map = sl.getRegionsLoad(); if (map.containsKey(regionInfo.getRegionName())) { req = map.get(regionInfo.getRegionName()).getRequestsCount(); - locality = map.get(regionInfo.getRegionName()).getDataLocality(); } Integer i = regDistribution.get(addr); if (null == i) i = Integer.valueOf(0); @@ -307,7 +305,6 @@ conf))) %> <%= escapeXml(Bytes.toStringBinary(HRegionInfo.getEndKeyForDisplay(regionInfo, conf))) %> - <%= locality%> <%= req%> <% if (withReplica) {