diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 3282838..7aa330c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -34,11 +34,13 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
@@ -173,14 +175,13 @@ public class MetaTableAccessor {
* @throws IOException
* @SuppressWarnings("deprecation")
*/
- private static Table getHTable(final Connection connection,
- final TableName tableName)
+ private static Table getHTable(final Connection connection, final TableName tableName)
throws IOException {
// We used to pass whole CatalogTracker in here, now we just pass in Connection
if (connection == null || connection.isClosed()) {
throw new NullPointerException("No connection");
}
- return new HTable(tableName, connection);
+ return connection.getTable(tableName);
}
/**
@@ -867,31 +868,30 @@ public class MetaTableAccessor {
/**
* Count regions in hbase:meta for passed table.
- * @param c Configuration object
+ * Do not use if you can. Use {@link #getRegionCount(Connection, TableName)} instead.
+ * @param configuration
* @param tableName table name to count regions for
* @return Count or regions in table tableName
* @throws IOException
*/
- @Deprecated
- public static int getRegionCount(final Configuration c, final String tableName)
- throws IOException {
- return getRegionCount(c, TableName.valueOf(tableName));
+ public static int getRegionCount(final Configuration configuration, final TableName tableName)
+ throws IOException {
+ try (Connection connection = ConnectionFactory.createConnection(configuration)) {
+ return getRegionCount(connection, tableName);
+ }
}
/**
* Count regions in hbase:meta for passed table.
- * @param c Configuration object
+ * @param connection
* @param tableName table name to count regions for
* @return Count or regions in table tableName
* @throws IOException
*/
- public static int getRegionCount(final Configuration c, final TableName tableName)
- throws IOException {
- HTable t = new HTable(c, tableName);
- try {
- return t.getRegionLocations().size();
- } finally {
- t.close();
+ public static int getRegionCount(final Connection connection, final TableName tableName)
+ throws IOException {
+ try (RegionLocator locator = connection.getRegionLocator(tableName)) {
+ return locator.getAllRegionLocations().size();
}
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
index 374ce28..b489af2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.security.UserProvider;
* A non-instantiable class that manages creation of {@link Connection}s.
* Managing the lifecycle of the {@link Connection}s to the cluster is the responsibility of
* the caller.
- * From this {@link Connection} {@link Table} implementations are retrieved
+ * From a {@link Connection}, {@link Table} implementations are retrieved
* with {@link Connection#getTable(TableName)}. Example:
*
* Connection connection = ConnectionFactory.createConnection(config);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index b3a6295..b5cef3d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -180,7 +180,7 @@ import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
/**
- * An internal, A non-instantiable class that manages creation of {@link HConnection}s.
+ * An internal, non-instantiable class that manages creation of {@link HConnection}s.
*/
@SuppressWarnings("serial")
@InterfaceAudience.Private
@@ -774,16 +774,7 @@ class ConnectionManager {
* @throws IOException
*/
private Registry setupRegistry() throws IOException {
- String registryClass = this.conf.get("hbase.client.registry.impl",
- ZooKeeperRegistry.class.getName());
- Registry registry = null;
- try {
- registry = (Registry)Class.forName(registryClass).newInstance();
- } catch (Throwable t) {
- throw new IOException(t);
- }
- registry.init(this);
- return registry;
+ return RegistryFactory.getRegistry(this);
}
/**
@@ -1010,8 +1001,8 @@ class ConnectionManager {
@Override
public List locateRegions(final TableName tableName,
final boolean useCache, final boolean offlined) throws IOException {
- NavigableMap regions = MetaScanner.allTableRegions(conf, this,
- tableName);
+ NavigableMap regions =
+ MetaScanner.allTableRegions(conf, this, tableName);
final List locations = new ArrayList();
for (HRegionInfo regionInfo : regions.keySet()) {
RegionLocations list = locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index aec5ae8..cb72a88 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -83,7 +83,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterReque
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
@@ -3734,7 +3733,7 @@ public class HBaseAdmin implements Admin {
public CoprocessorRpcChannel coprocessorService(ServerName sn) {
return new RegionServerCoprocessorRpcChannel(connection, sn);
}
-
+
@Override
public void updateConfiguration(ServerName server) throws IOException {
try {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index c3a94e3..dbaaffa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -80,24 +80,23 @@ import com.google.protobuf.Service;
import com.google.protobuf.ServiceException;
/**
- *
- * HTable is no longer a client API. It is marked InterfaceAudience.Private indicating that
- * this is an HBase-internal class as defined in
- * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
- * There are no guarantees for backwards source / binary compatibility and methods or class can
- * change or go away without deprecation. Use {@link Connection#getTable(TableName)}
- * to obtain an instance of {@link Table} instead of constructing an HTable directly.
- * An implementation of {@link Table}. Used to communicate with a single HBase table.
+ * An implementation of {@link Table}. Used to communicate with a single HBase table.
* Lightweight. Get as needed and just close when done.
* Instances of this class SHOULD NOT be constructed directly.
* Obtain an instance via {@link Connection}. See {@link ConnectionFactory}
* class comment for an example of how.
*
- *
This class is NOT thread safe for reads nor write.
+ *
This class is NOT thread safe for reads nor writes.
* In the case of writes (Put, Delete), the underlying write buffer can
* be corrupted if multiple threads contend over a single HTable instance.
* In the case of reads, some fields used by a Scan are shared among all threads.
*
+ *
HTable is no longer a client API. Use {@link Table} instead. It is marked
+ * InterfaceAudience.Private indicating that this is an HBase-internal class as defined in
+ * Hadoop Interface Classification
+ * There are no guarantees for backwards source / binary compatibility and methods or class can
+ * change or go away without deprecation.
+ *
* @see Table
* @see Admin
* @see Connection
@@ -291,6 +290,8 @@ public class HTable implements HTableInterface, RegionLocator {
/**
* Creates an object to access a HBase table.
+ * Used by HBase internally. DO NOT USE. See {@link ConnectionFactory} class comment for how to
+ * get a {@link Table} instance (use {@link Table} instead of {@link HTable}).
* @param tableName Name of the table.
* @param connection HConnection to be used.
* @param pool ExecutorService to be used.
@@ -1799,11 +1800,11 @@ public class HTable implements HTableInterface, RegionLocator {
* @throws IOException
*/
public static void main(String[] args) throws IOException {
- Table t = new HTable(HBaseConfiguration.create(), args[0]);
- try {
- System.out.println(t.get(new Get(Bytes.toBytes(args[1]))));
- } finally {
- t.close();
+ Configuration conf = HBaseConfiguration.create();
+ try (Connection connection = ConnectionFactory.createConnection(conf)) {
+ try (Table table = connection.getTable(TableName.valueOf(args[0]))) {
+ System.out.println(table.get(new Get(Bytes.toBytes(args[1]))));
+ }
}
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
index 5312dfb..6dd53ec 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
@@ -41,6 +41,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ExceptionUtil;
+import com.google.common.annotations.VisibleForTesting;
+
/**
* Scanner class that contains the hbase:meta table scanning logic.
* Provided visitors will be called for each row.
@@ -59,13 +61,16 @@ public class MetaScanner {
/**
* Scans the meta table and calls a visitor on each RowResult and uses a empty
* start row value as table name.
+ *
+ *
Visible for testing. Use {@link
+ * #metaScan(Configuration, Connection, MetaScannerVisitor, TableName)} instead.
*
* @param configuration conf
* @param visitor A custom visitor
* @throws IOException e
*/
- public static void metaScan(Configuration configuration,
- MetaScannerVisitor visitor)
+ @VisibleForTesting // Do not use. Used by tests only and hbck.
+ public static void metaScan(Configuration configuration, MetaScannerVisitor visitor)
throws IOException {
metaScan(configuration, visitor, null, null, Integer.MAX_VALUE);
}
@@ -91,6 +96,9 @@ public class MetaScanner {
* Scans the meta table and calls a visitor on each RowResult. Uses a table
* name and a row name to locate meta regions. And it only scans at most
* rowLimit of rows.
+ *
+ *
Visible for testing. Use {@link
+ * #metaScan(Configuration, Connection, MetaScannerVisitor, TableName)} instead.
*
* @param configuration HBase configuration.
* @param visitor Visitor object.
@@ -102,12 +110,12 @@ public class MetaScanner {
* will be set to default value Integer.MAX_VALUE.
* @throws IOException e
*/
+ @VisibleForTesting // Do not use. Used by Master but by a method that is used testing.
public static void metaScan(Configuration configuration,
MetaScannerVisitor visitor, TableName userTableName, byte[] row,
int rowLimit)
throws IOException {
- metaScan(configuration, null, visitor, userTableName, row, rowLimit,
- TableName.META_TABLE_NAME);
+ metaScan(configuration, null, visitor, userTableName, row, rowLimit, TableName.META_TABLE_NAME);
}
/**
@@ -133,7 +141,7 @@ public class MetaScanner {
throws IOException {
boolean closeConnection = false;
- if (connection == null){
+ if (connection == null) {
connection = ConnectionFactory.createConnection(configuration);
closeConnection = true;
}
@@ -141,16 +149,16 @@ public class MetaScanner {
int rowUpperLimit = rowLimit > 0 ? rowLimit: Integer.MAX_VALUE;
// Calculate startrow for scan.
byte[] startRow;
- ResultScanner scanner = null;
- HTable metaTable = null;
- try {
- metaTable = new HTable(TableName.META_TABLE_NAME, connection, null);
+ try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) {
if (row != null) {
// Scan starting at a particular row in a particular table
byte[] searchRow = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false);
-
- Result startRowResult = metaTable.getRowOrBefore(searchRow, HConstants.CATALOG_FAMILY);
-
+ Scan scan =
+ Scan.createGetClosestRowOrBeforeReverseScan(searchRow, HConstants.CATALOG_FAMILY);
+ Result startRowResult = null;
+ try (ResultScanner resultScanner = metaTable.getScanner(scan)) {
+ startRowResult = resultScanner.next();
+ }
if (startRowResult == null) {
throw new TableNotFoundException("Cannot find row in "+ TableName
.META_TABLE_NAME.getNameAsString()+" for table: "
@@ -184,25 +192,18 @@ public class MetaScanner {
Bytes.toStringBinary(startRow) + " for max=" + rowUpperLimit + " with caching=" + rows);
}
// Run the scan
- scanner = metaTable.getScanner(scan);
- Result result;
- int processedRows = 0;
- while ((result = scanner.next()) != null) {
- if (visitor != null) {
- if (!visitor.processRow(result)) break;
+ try (ResultScanner resultScanner = metaTable.getScanner(scan)) {
+ Result result;
+ int processedRows = 0;
+ while ((result = resultScanner.next()) != null) {
+ if (visitor != null) {
+ if (!visitor.processRow(result)) break;
+ }
+ processedRows++;
+ if (processedRows >= rowUpperLimit) break;
}
- processedRows++;
- if (processedRows >= rowUpperLimit) break;
}
} finally {
- if (scanner != null) {
- try {
- scanner.close();
- } catch (Throwable t) {
- ExceptionUtil.rethrowIfInterrupt(t);
- LOG.debug("Got exception in closing the result scanner", t);
- }
- }
if (visitor != null) {
try {
visitor.close();
@@ -211,16 +212,8 @@ public class MetaScanner {
LOG.debug("Got exception in closing the meta scanner visitor", t);
}
}
- if (metaTable != null) {
- try {
- metaTable.close();
- } catch (Throwable t) {
- ExceptionUtil.rethrowIfInterrupt(t);
- LOG.debug("Got exception in closing meta table", t);
- }
- }
if (closeConnection) {
- connection.close();
+ if (connection != null) connection.close();
}
}
}
@@ -246,6 +239,7 @@ public class MetaScanner {
* @return List of all user-space regions.
* @throws IOException
*/
+ @VisibleForTesting // And for hbck.
public static List listAllRegions(Configuration conf, final boolean offlined)
throws IOException {
final List regions = new ArrayList();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java
index 8168fe1..754beb0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.util.Pair;
/**
* Used to view region location information for a single HBase table.
- * Obtain an instance from an {@link HConnection}.
+ * Obtain an instance from a {@link Connection}.
*
* @see ConnectionFactory
* @see Connection
@@ -98,4 +98,4 @@ public interface RegionLocator extends Closeable {
* Gets the fully qualified table name instance of this table.
*/
TableName getName();
-}
+}
\ No newline at end of file
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
index c6ed801..412e4fa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java
@@ -20,11 +20,14 @@ package org.apache.hadoop.hbase.client;
import java.io.IOException;
import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* Cluster registry.
- * Implemenations hold cluster information such as this cluster's id, location of hbase:meta, etc.
+ * Implementations hold cluster information such as this cluster's id, location of hbase:meta, etc.
+ * Internal use only.
*/
+@InterfaceAudience.Private
interface Registry {
/**
* @param connection
@@ -47,4 +50,4 @@ interface Registry {
* @throws IOException
*/
int getCurrentNrHRS() throws IOException;
-}
+}
\ No newline at end of file
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java
new file mode 100644
index 0000000..dc2cb7c
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegistryFactory.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * Get instance of configured Registry.
+ */
+@InterfaceAudience.Private
+class RegistryFactory {
+ /**
+ * @return The cluster registry implementation to use.
+ * @throws IOException
+ */
+ static Registry getRegistry(final Connection connection)
+ throws IOException {
+ String registryClass = connection.getConfiguration().get("hbase.client.registry.impl",
+ ZooKeeperRegistry.class.getName());
+ Registry registry = null;
+ try {
+ registry = (Registry)Class.forName(registryClass).newInstance();
+ } catch (Throwable t) {
+ throw new IOException(t);
+ }
+ registry.init(connection);
+ return registry;
+ }
+}
\ No newline at end of file
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index 2aea19f..3a0ce69 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -52,8 +52,8 @@ import org.apache.hadoop.hbase.util.Bytes;
* To scan everything for each row, instantiate a Scan object.
*
* To modify scanner caching for just this scan, use {@link #setCaching(int) setCaching}.
- * If caching is NOT set, we will use the caching value of the hosting {@link HTable}. See
- * {@link HTable#setScannerCaching(int)}. In addition to row caching, it is possible to specify a
+ * If caching is NOT set, we will use the caching value of the hosting {@link Table}.
+ * In addition to row caching, it is possible to specify a
* maximum result size, using {@link #setMaxResultSize(long)}. When both are used,
* single server requests are limited by either number of rows or maximum result size, whichever
* limit comes first.
@@ -478,7 +478,8 @@ public class Scan extends Query {
/**
* Set the number of rows for caching that will be passed to scanners.
- * If not set, the default setting from {@link HTable#getScannerCaching()} will apply.
+ * If not set, the Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will
+ * apply.
* Higher caching values will enable faster scanners but will use more memory.
* @param caching the number of rows for caching
*/
@@ -894,4 +895,19 @@ public class Scan extends Query {
return (Scan) super.setIsolationLevel(level);
}
-}
+ /**
+ * Utility that creates a Scan that will do a small scan in reverse from passed row+family
+ * looking for next closest row.
+ * @param row
+ * @param family
+ * @return An instance of Scan primed with passed row and family to
+ * scan in reverse for one row only.
+ */
+ static Scan createGetClosestRowOrBeforeReverseScan(byte[] row, byte[] family) {
+ Scan scan = new Scan(row, family);
+ scan.setSmall(true);
+ scan.setReversed(true);
+ scan.setCaching(1);
+ return scan;
+ }
+}
\ No newline at end of file
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
index 85ce4e2..7b7cd16 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.hbase.client.coprocessor;
+import java.io.Closeable;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
@@ -36,7 +37,8 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
@@ -72,19 +74,32 @@ import com.google.protobuf.Message;
*
For methods to find maximum, minimum, sum, rowcount, it returns the
* parameter type. For average and std, it returns a double value. For row
* count, it returns a long value.
+ * Call {@link #close()} when done.
*/
@InterfaceAudience.Private
-public class AggregationClient {
-
+public class AggregationClient implements Closeable {
+ // TODO: This class is not used. Move to examples?
private static final Log log = LogFactory.getLog(AggregationClient.class);
- Configuration conf;
+ private final Connection connection;
/**
* Constructor with Conf object
* @param cfg
*/
public AggregationClient(Configuration cfg) {
- this.conf = cfg;
+ try {
+ // Create a connection on construction. Will use it making each of the calls below.
+ this.connection = ConnectionFactory.createConnection(cfg);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ if (this.connection != null && !this.connection.isClosed()) {
+ this.connection.close();
+ }
}
/**
@@ -101,15 +116,9 @@ public class AggregationClient {
*/
public R max(
final TableName tableName, final ColumnInterpreter ci, final Scan scan)
- throws Throwable {
- Table table = null;
- try {
- table = new HTable(conf, tableName);
+ throws Throwable {
+ try (Table table = connection.getTable(tableName)) {
return max(table, ci, scan);
- } finally {
- if (table != null) {
- table.close();
- }
}
}
@@ -196,15 +205,9 @@ public class AggregationClient {
*/
public R min(
final TableName tableName, final ColumnInterpreter ci, final Scan scan)
- throws Throwable {
- Table table = null;
- try {
- table = new HTable(conf, tableName);
+ throws Throwable {
+ try (Table table = connection.getTable(tableName)) {
return min(table, ci, scan);
- } finally {
- if (table != null) {
- table.close();
- }
}
}
@@ -276,15 +279,9 @@ public class AggregationClient {
*/
public long rowCount(
final TableName tableName, final ColumnInterpreter ci, final Scan scan)
- throws Throwable {
- Table table = null;
- try {
- table = new HTable(conf, tableName);
- return rowCount(table, ci, scan);
- } finally {
- if (table != null) {
- table.close();
- }
+ throws Throwable {
+ try (Table table = connection.getTable(tableName)) {
+ return rowCount(table, ci, scan);
}
}
@@ -350,15 +347,9 @@ public class AggregationClient {
*/
public S sum(
final TableName tableName, final ColumnInterpreter ci, final Scan scan)
- throws Throwable {
- Table table = null;
- try {
- table = new HTable(conf, tableName);
- return sum(table, ci, scan);
- } finally {
- if (table != null) {
- table.close();
- }
+ throws Throwable {
+ try (Table table = connection.getTable(tableName)) {
+ return sum(table, ci, scan);
}
}
@@ -424,14 +415,8 @@ public class AggregationClient {
private Pair getAvgArgs(
final TableName tableName, final ColumnInterpreter ci, final Scan scan)
throws Throwable {
- Table table = null;
- try {
- table = new HTable(conf, tableName);
- return getAvgArgs(table, ci, scan);
- } finally {
- if (table != null) {
- table.close();
- }
+ try (Table table = connection.getTable(tableName)) {
+ return getAvgArgs(table, ci, scan);
}
}
@@ -615,14 +600,8 @@ public class AggregationClient {
public
double std(final TableName tableName, ColumnInterpreter ci,
Scan scan) throws Throwable {
- Table table = null;
- try {
- table = new HTable(conf, tableName);
- return std(table, ci, scan);
- } finally {
- if (table != null) {
- table.close();
- }
+ try (Table table = connection.getTable(tableName)) {
+ return std(table, ci, scan);
}
}
@@ -728,14 +707,8 @@ public class AggregationClient {
public
R median(final TableName tableName, ColumnInterpreter ci,
Scan scan) throws Throwable {
- Table table = null;
- try {
- table = new HTable(conf, tableName);
- return median(table, ci, scan);
- } finally {
- if (table != null) {
- table.close();
- }
+ try (Table table = connection.getTable(tableName)) {
+ return median(table, ci, scan);
}
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
index e808904..cd172b0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
@@ -28,23 +28,26 @@ Provides HBase Client
Overview
To administer HBase, create and drop tables, list and alter tables,
- use {@link org.apache.hadoop.hbase.client.HBaseAdmin}. Once created, table access is via an instance
- of {@link org.apache.hadoop.hbase.client.HTable}. You add content to a table a row at a time. To insert,
- create an instance of a {@link org.apache.hadoop.hbase.client.Put} object. Specify value, target column
- and optionally a timestamp. Commit your update using {@link org.apache.hadoop.hbase.client.HTable#put(Put)}.
- To fetch your inserted value, use {@link org.apache.hadoop.hbase.client.Get}. The Get can be specified to be broad -- get all
- on a particular row -- or narrow; i.e. return only a single cell value. After creating an instance of
- Get, invoke {@link org.apache.hadoop.hbase.client.HTable#get(Get)}. Use
- {@link org.apache.hadoop.hbase.client.Scan} to set up a scanner -- a Cursor- like access. After
- creating and configuring your Scan instance, call {@link org.apache.hadoop.hbase.client.HTable#getScanner(Scan)} and then
- invoke next on the returned object. Both {@link org.apache.hadoop.hbase.client.HTable#get(Get)} and
- {@link org.apache.hadoop.hbase.client.HTable#getScanner(Scan)} return a
+ use {@link org.apache.hadoop.hbase.client.Admin}. Once created, table access is via an instance
+ of {@link org.apache.hadoop.hbase.client.Table}. You add content to a table a row at a time. To
+ insert, create an instance of a {@link org.apache.hadoop.hbase.client.Put} object. Specify value,
+ target column and optionally a timestamp. Commit your update using
+ {@link org.apache.hadoop.hbase.client.Table#put(Put)}.
+ To fetch your inserted value, use {@link org.apache.hadoop.hbase.client.Get}. The Get can be
+ specified to be broad -- get all on a particular row -- or narrow; i.e. return only a single cell
+ value. After creating an instance of
+ Get, invoke {@link org.apache.hadoop.hbase.client.Table#get(Get)}.
+
+
Use {@link org.apache.hadoop.hbase.client.Scan} to set up a scanner -- a Cursor- like access.
+ After creating and configuring your Scan instance, call
+ {@link org.apache.hadoop.hbase.client.Table#getScanner(Scan)} and then
+ invoke next on the returned object. Both {@link org.apache.hadoop.hbase.client.Table#get(Get)}
+ and {@link org.apache.hadoop.hbase.client.Table#getScanner(Scan)} return a
{@link org.apache.hadoop.hbase.client.Result}.
-A Result is a List of {@link org.apache.hadoop.hbase.KeyValue}s. It has facility for packaging the return
-in different formats.
- Use {@link org.apache.hadoop.hbase.client.Delete} to remove content.
+
+
Use {@link org.apache.hadoop.hbase.client.Delete} to remove content.
You can remove individual cells or entire families, etc. Pass it to
- {@link org.apache.hadoop.hbase.client.HTable#delete(Delete)} to execute.
+ {@link org.apache.hadoop.hbase.client.Table#delete(Delete)} to execute.
Puts, Gets and Deletes take out a lock on the target row for the duration of their operation.
Concurrent modifications to a single row are serialized. Gets and scans run concurrently without
@@ -68,8 +71,11 @@ in different formats.
import java.io.IOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
@@ -87,9 +93,16 @@ public class MyLittleHBaseClient {
// be found on the CLASSPATH
Configuration config = HBaseConfiguration.create();
- // This instantiates an HTable object that connects you to
- // the "myLittleHBaseTable" table.
- HTable table = new HTable(config, "myLittleHBaseTable");
+ // Next you need a Connection to the cluster. Create one. When done with it,
+ // close it (Should start a try/finally after this creation so it gets closed
+ // for sure but leaving this out for readibility's sake).
+ Connection connection = ConnectionFactory.createConnection(config);
+
+ // This instantiates a Table object that connects you to
+ // the "myLittleHBaseTable" table (TableName.valueOf turns String into TableName instance).
+ // When done with it, close it (Should start a try/finally after this creation so it gets
+ // closed for sure but leaving this out for readibility's sake).
+ Table table = connection.getTable(TableName.valueOf("myLittleHBaseTable"));
// To add to a row, use Put. A Put constructor takes the name of the row
// you want to insert into as a byte array. In HBase, the Bytes class has
@@ -152,15 +165,19 @@ public class MyLittleHBaseClient {
// Thats why we have it inside a try/finally clause
scanner.close();
}
+
+ // Close your table and cluster connection.
+ table.close();
+ connection.close();
}
}
There are many other methods for putting data into and getting data out of - HBase, but these examples should get you started. See the HTable javadoc for + HBase, but these examples should get you started. See the Table javadoc for more methods. Additionally, there are methods for managing tables in the - HBaseAdmin class.
+ Admin class.If your client is NOT Java, then you should consider the Thrift or REST libraries.
@@ -168,20 +185,14 @@ public class MyLittleHBaseClient {There are many other methods for putting data into and getting data out of - HBase, but these examples should get you started. See the HTable javadoc for - more methods. Additionally, there are methods for managing tables in the - HBaseAdmin class.
-See also the section in the HBase Reference Guide where it discusses HBase Client. It - has section on how to access HBase from inside your multithreaded environtment + has section on how to access HBase from inside your multithreaded environment how to control resources consumed client-side, etc.