diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 3282838..896ef48 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -34,11 +34,13 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -180,7 +182,7 @@ public class MetaTableAccessor { if (connection == null || connection.isClosed()) { throw new NullPointerException("No connection"); } - return new HTable(tableName, connection); + return connection.getTable(tableName); } /** @@ -887,11 +889,10 @@ public class MetaTableAccessor { */ public static int getRegionCount(final Configuration c, final TableName tableName) throws IOException { - HTable t = new HTable(c, tableName); - try { - return t.getRegionLocations().size(); - } finally { - t.close(); + try (Connection connection = ConnectionFactory.createConnection(c)) { + try (RegionLocator locator = connection.getRegionLocator(tableName)) { + return locator.getAllRegionLocations().size(); + } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java index 234c5ae..62e11374 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java @@ -161,15 +161,6 @@ public class RegionLoad { } /** - * @return the data locality of region in the regionserver. - */ - public float getDataLocality() { - if (regionLoadPB.hasDataLocality()) { - return regionLoadPB.getDataLocality(); - } - return 0.0f; - } - /** * @see java.lang.Object#toString() */ @Override @@ -214,8 +205,6 @@ public class RegionLoad { compactionProgressPct); sb = Strings.appendKeyValue(sb, "completeSequenceId", this.getCompleteSequenceId()); - sb = Strings.appendKeyValue(sb, "dataLocality", - this.getDataLocality()); return sb.toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index 374ce28..b489af2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.security.UserProvider; * A non-instantiable class that manages creation of {@link Connection}s. * Managing the lifecycle of the {@link Connection}s to the cluster is the responsibility of * the caller. - * From this {@link Connection} {@link Table} implementations are retrieved + * From a {@link Connection}, {@link Table} implementations are retrieved * with {@link Connection#getTable(TableName)}. Example: *
* Connection connection = ConnectionFactory.createConnection(config);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 75dd72c..7136f72 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -68,9 +68,11 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.hadoop.hbase.ipc.MasterCoprocessorRpcChannel;
+import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
import org.apache.hadoop.hbase.ipc.RegionServerCoprocessorRpcChannel;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
@@ -83,6 +85,9 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterReque
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
@@ -106,6 +111,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnaps
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest;
@@ -661,24 +668,47 @@ public class HBaseAdmin implements Admin {
// Wait until all regions deleted
for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) {
try {
- // Find whether all regions are deleted.
- List regionLations =
- MetaScanner.listTableRegionLocations(conf, connection, tableName);
+ HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName);
+ Scan scan = MetaTableAccessor.getScanForTableName(tableName);
+ scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
+ ScanRequest request = RequestConverter.buildScanRequest(
+ firstMetaServer.getRegionInfo().getRegionName(), scan, 1, true);
+ Result[] values = null;
+ // Get a batch at a time.
+ ClientService.BlockingInterface server = connection.getClient(firstMetaServer
+ .getServerName());
+ PayloadCarryingRpcController controller = new PayloadCarryingRpcController();
+ try {
+ controller.setPriority(tableName);
+ ScanResponse response = server.scan(controller, request);
+ values = ResponseConverter.getResults(controller.cellScanner(), response);
+ } catch (ServiceException se) {
+ throw ProtobufUtil.getRemoteException(se);
+ }
// let us wait until hbase:meta table is updated and
// HMaster removes the table from its HTableDescriptors
- if (regionLations == null || regionLations.size() == 0) {
- HTableDescriptor htd = getTableDescriptorByTableName(tableName);
-
- if (htd == null) {
- // table could not be found in master - we are done.
- tableExists = false;
+ if (values == null || values.length == 0) {
+ tableExists = false;
+ GetTableDescriptorsResponse htds;
+ MasterKeepAliveConnection master = connection.getKeepAliveMasterService();
+ try {
+ GetTableDescriptorsRequest req =
+ RequestConverter.buildGetTableDescriptorsRequest(tableName);
+ htds = master.getTableDescriptors(null, req);
+ } catch (ServiceException se) {
+ throw ProtobufUtil.getRemoteException(se);
+ } finally {
+ master.close();
+ }
+ tableExists = !htds.getTableSchemaList().isEmpty();
+ if (!tableExists) {
break;
}
}
} catch (IOException ex) {
failures++;
- if(failures >= numRetries - 1) { // no more tries left
+ if(failures == numRetries - 1) { // no more tries left
if (ex instanceof RemoteException) {
throw ((RemoteException) ex).unwrapRemoteException();
} else {
@@ -2590,27 +2620,6 @@ public class HBaseAdmin implements Admin {
}
/**
- * Get tableDescriptor
- * @param tableName one table name
- * @return HTD the HTableDescriptor or null if the table not exists
- * @throws IOException if a remote or network exception occurs
- */
- private HTableDescriptor getTableDescriptorByTableName(TableName tableName)
- throws IOException {
- List tableNames = new ArrayList(1);
- tableNames.add(tableName);
-
- HTableDescriptor[] htdl = getTableDescriptorsByTableName(tableNames);
-
- if (htdl == null || htdl.length == 0) {
- return null;
- }
- else {
- return htdl[0];
- }
- }
-
- /**
* Get tableDescriptors
* @param names List of table names
* @return HTD[] the tableDescriptor
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index c3a94e3..dbaaffa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -80,24 +80,23 @@ import com.google.protobuf.Service;
import com.google.protobuf.ServiceException;
/**
- *
- * HTable is no longer a client API. It is marked InterfaceAudience.Private indicating that
- * this is an HBase-internal class as defined in
- * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
- * There are no guarantees for backwards source / binary compatibility and methods or class can
- * change or go away without deprecation. Use {@link Connection#getTable(TableName)}
- * to obtain an instance of {@link Table} instead of constructing an HTable directly.
- * An implementation of {@link Table}. Used to communicate with a single HBase table.
+ * An implementation of {@link Table}. Used to communicate with a single HBase table.
* Lightweight. Get as needed and just close when done.
* Instances of this class SHOULD NOT be constructed directly.
* Obtain an instance via {@link Connection}. See {@link ConnectionFactory}
* class comment for an example of how.
*
- *
This class is NOT thread safe for reads nor write.
+ *
This class is NOT thread safe for reads nor writes.
* In the case of writes (Put, Delete), the underlying write buffer can
* be corrupted if multiple threads contend over a single HTable instance.
* In the case of reads, some fields used by a Scan are shared among all threads.
*
+ *
HTable is no longer a client API. Use {@link Table} instead. It is marked
+ * InterfaceAudience.Private indicating that this is an HBase-internal class as defined in
+ * Hadoop Interface Classification
+ * There are no guarantees for backwards source / binary compatibility and methods or class can
+ * change or go away without deprecation.
+ *
* @see Table
* @see Admin
* @see Connection
@@ -291,6 +290,8 @@ public class HTable implements HTableInterface, RegionLocator {
/**
* Creates an object to access a HBase table.
+ * Used by HBase internally. DO NOT USE. See {@link ConnectionFactory} class comment for how to
+ * get a {@link Table} instance (use {@link Table} instead of {@link HTable}).
* @param tableName Name of the table.
* @param connection HConnection to be used.
* @param pool ExecutorService to be used.
@@ -1799,11 +1800,11 @@ public class HTable implements HTableInterface, RegionLocator {
* @throws IOException
*/
public static void main(String[] args) throws IOException {
- Table t = new HTable(HBaseConfiguration.create(), args[0]);
- try {
- System.out.println(t.get(new Get(Bytes.toBytes(args[1]))));
- } finally {
- t.close();
+ Configuration conf = HBaseConfiguration.create();
+ try (Connection connection = ConnectionFactory.createConnection(conf)) {
+ try (Table table = connection.getTable(TableName.valueOf(args[0]))) {
+ System.out.println(table.get(new Get(Bytes.toBytes(args[1]))));
+ }
}
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
index 5312dfb..c697979 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
@@ -64,8 +64,7 @@ public class MetaScanner {
* @param visitor A custom visitor
* @throws IOException e
*/
- public static void metaScan(Configuration configuration,
- MetaScannerVisitor visitor)
+ public static void metaScan(Configuration configuration, MetaScannerVisitor visitor)
throws IOException {
metaScan(configuration, visitor, null, null, Integer.MAX_VALUE);
}
@@ -106,8 +105,7 @@ public class MetaScanner {
MetaScannerVisitor visitor, TableName userTableName, byte[] row,
int rowLimit)
throws IOException {
- metaScan(configuration, null, visitor, userTableName, row, rowLimit,
- TableName.META_TABLE_NAME);
+ metaScan(configuration, null, visitor, userTableName, row, rowLimit, TableName.META_TABLE_NAME);
}
/**
@@ -133,7 +131,7 @@ public class MetaScanner {
throws IOException {
boolean closeConnection = false;
- if (connection == null){
+ if (connection == null) {
connection = ConnectionFactory.createConnection(configuration);
closeConnection = true;
}
@@ -141,16 +139,16 @@ public class MetaScanner {
int rowUpperLimit = rowLimit > 0 ? rowLimit: Integer.MAX_VALUE;
// Calculate startrow for scan.
byte[] startRow;
- ResultScanner scanner = null;
- HTable metaTable = null;
- try {
- metaTable = new HTable(TableName.META_TABLE_NAME, connection, null);
+ try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) {
if (row != null) {
// Scan starting at a particular row in a particular table
byte[] searchRow = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false);
-
- Result startRowResult = metaTable.getRowOrBefore(searchRow, HConstants.CATALOG_FAMILY);
-
+ Scan scan =
+ Scan.createGetClosestRowOrBeforeReverseScan(searchRow, HConstants.CATALOG_FAMILY);
+ Result startRowResult = null;
+ try (ResultScanner resultScanner = metaTable.getScanner(scan)) {
+ startRowResult = resultScanner.next();
+ }
if (startRowResult == null) {
throw new TableNotFoundException("Cannot find row in "+ TableName
.META_TABLE_NAME.getNameAsString()+" for table: "
@@ -184,25 +182,18 @@ public class MetaScanner {
Bytes.toStringBinary(startRow) + " for max=" + rowUpperLimit + " with caching=" + rows);
}
// Run the scan
- scanner = metaTable.getScanner(scan);
- Result result;
- int processedRows = 0;
- while ((result = scanner.next()) != null) {
- if (visitor != null) {
- if (!visitor.processRow(result)) break;
+ try (ResultScanner resultScanner = metaTable.getScanner(scan)) {
+ Result result;
+ int processedRows = 0;
+ while ((result = resultScanner.next()) != null) {
+ if (visitor != null) {
+ if (!visitor.processRow(result)) break;
+ }
+ processedRows++;
+ if (processedRows >= rowUpperLimit) break;
}
- processedRows++;
- if (processedRows >= rowUpperLimit) break;
}
} finally {
- if (scanner != null) {
- try {
- scanner.close();
- } catch (Throwable t) {
- ExceptionUtil.rethrowIfInterrupt(t);
- LOG.debug("Got exception in closing the result scanner", t);
- }
- }
if (visitor != null) {
try {
visitor.close();
@@ -211,16 +202,8 @@ public class MetaScanner {
LOG.debug("Got exception in closing the meta scanner visitor", t);
}
}
- if (metaTable != null) {
- try {
- metaTable.close();
- } catch (Throwable t) {
- ExceptionUtil.rethrowIfInterrupt(t);
- LOG.debug("Got exception in closing meta table", t);
- }
- }
if (closeConnection) {
- connection.close();
+ if (connection != null) connection.close();
}
}
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java
index 8168fe1..754beb0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocator.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.util.Pair;
/**
* Used to view region location information for a single HBase table.
- * Obtain an instance from an {@link HConnection}.
+ * Obtain an instance from a {@link Connection}.
*
* @see ConnectionFactory
* @see Connection
@@ -98,4 +98,4 @@ public interface RegionLocator extends Closeable {
* Gets the fully qualified table name instance of this table.
*/
TableName getName();
-}
+}
\ No newline at end of file
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index b5bd365..2098939 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -52,8 +52,8 @@ import org.apache.hadoop.hbase.util.Bytes;
* To scan everything for each row, instantiate a Scan object.
*
* To modify scanner caching for just this scan, use {@link #setCaching(int) setCaching}.
- * If caching is NOT set, we will use the caching value of the hosting {@link HTable}. See
- * {@link HTable#setScannerCaching(int)}. In addition to row caching, it is possible to specify a
+ * If caching is NOT set, we will use the caching value of the hosting {@link Table}.
+ * In addition to row caching, it is possible to specify a
* maximum result size, using {@link #setMaxResultSize(long)}. When both are used,
* single server requests are limited by either number of rows or maximum result size, whichever
* limit comes first.
@@ -893,4 +893,19 @@ public class Scan extends Query {
return (Scan) super.setIsolationLevel(level);
}
-}
+ /**
+ * Utility that creates a Scan that will do a small scan in reverse from passed row+family
+ * looking for next closest row.
+ * @param row
+ * @param family
+ * @return An instance of Scan primed with passed row and family to
+ * scan in reverse for one row only.
+ */
+ public static Scan createGetClosestRowOrBeforeReverseScan(byte[] row, byte[] family) {
+ Scan scan = new Scan(row, family);
+ scan.setSmall(true);
+ scan.setReversed(true);
+ scan.setCaching(1);
+ return scan;
+ }
+}
\ No newline at end of file
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
index 85ce4e2..d9ce737 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
@@ -36,7 +36,8 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
@@ -102,13 +103,9 @@ public class AggregationClient {
public R max(
final TableName tableName, final ColumnInterpreter ci, final Scan scan)
throws Throwable {
- Table table = null;
- try {
- table = new HTable(conf, tableName);
- return max(table, ci, scan);
- } finally {
- if (table != null) {
- table.close();
+ try (Connection connection = ConnectionFactory.createConnection(this.conf)) {
+ try (Table table = connection.getTable(tableName)) {
+ return max(table, ci, scan);
}
}
}
@@ -197,13 +194,9 @@ public class AggregationClient {
public R min(
final TableName tableName, final ColumnInterpreter ci, final Scan scan)
throws Throwable {
- Table table = null;
- try {
- table = new HTable(conf, tableName);
- return min(table, ci, scan);
- } finally {
- if (table != null) {
- table.close();
+ try (Connection connection = ConnectionFactory.createConnection(conf)) {
+ try (Table table = connection.getTable(tableName)) {
+ return min(table, ci, scan);
}
}
}
@@ -277,13 +270,9 @@ public class AggregationClient {
public long rowCount(
final TableName tableName, final ColumnInterpreter ci, final Scan scan)
throws Throwable {
- Table table = null;
- try {
- table = new HTable(conf, tableName);
- return rowCount(table, ci, scan);
- } finally {
- if (table != null) {
- table.close();
+ try (Connection connection = ConnectionFactory.createConnection(this.conf)) {
+ try (Table table = connection.getTable(tableName)) {
+ return rowCount(table, ci, scan);
}
}
}
@@ -351,13 +340,9 @@ public class AggregationClient {
public S sum(
final TableName tableName, final ColumnInterpreter ci, final Scan scan)
throws Throwable {
- Table table = null;
- try {
- table = new HTable(conf, tableName);
- return sum(table, ci, scan);
- } finally {
- if (table != null) {
- table.close();
+ try (Connection connection = ConnectionFactory.createConnection(this.conf)) {
+ try (Table table = connection.getTable(tableName)) {
+ return sum(table, ci, scan);
}
}
}
@@ -424,13 +409,9 @@ public class AggregationClient {
private Pair getAvgArgs(
final TableName tableName, final ColumnInterpreter ci, final Scan scan)
throws Throwable {
- Table table = null;
- try {
- table = new HTable(conf, tableName);
- return getAvgArgs(table, ci, scan);
- } finally {
- if (table != null) {
- table.close();
+ try (Connection connection = ConnectionFactory.createConnection(this.conf)) {
+ try (Table table = connection.getTable(tableName)) {
+ return getAvgArgs(table, ci, scan);
}
}
}
@@ -615,13 +596,9 @@ public class AggregationClient {
public
double std(final TableName tableName, ColumnInterpreter ci,
Scan scan) throws Throwable {
- Table table = null;
- try {
- table = new HTable(conf, tableName);
- return std(table, ci, scan);
- } finally {
- if (table != null) {
- table.close();
+ try (Connection connection = ConnectionFactory.createConnection(this.conf)) {
+ try (Table table = connection.getTable(tableName)) {
+ return std(table, ci, scan);
}
}
}
@@ -728,13 +705,9 @@ public class AggregationClient {
public
R median(final TableName tableName, ColumnInterpreter ci,
Scan scan) throws Throwable {
- Table table = null;
- try {
- table = new HTable(conf, tableName);
- return median(table, ci, scan);
- } finally {
- if (table != null) {
- table.close();
+ try (Connection connection = ConnectionFactory.createConnection(this.conf)) {
+ try (Table table = connection.getTable(tableName)) {
+ return median(table, ci, scan);
}
}
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
index e808904..cd172b0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
@@ -28,23 +28,26 @@ Provides HBase Client
Overview
To administer HBase, create and drop tables, list and alter tables,
- use {@link org.apache.hadoop.hbase.client.HBaseAdmin}. Once created, table access is via an instance
- of {@link org.apache.hadoop.hbase.client.HTable}. You add content to a table a row at a time. To insert,
- create an instance of a {@link org.apache.hadoop.hbase.client.Put} object. Specify value, target column
- and optionally a timestamp. Commit your update using {@link org.apache.hadoop.hbase.client.HTable#put(Put)}.
- To fetch your inserted value, use {@link org.apache.hadoop.hbase.client.Get}. The Get can be specified to be broad -- get all
- on a particular row -- or narrow; i.e. return only a single cell value. After creating an instance of
- Get, invoke {@link org.apache.hadoop.hbase.client.HTable#get(Get)}. Use
- {@link org.apache.hadoop.hbase.client.Scan} to set up a scanner -- a Cursor- like access. After
- creating and configuring your Scan instance, call {@link org.apache.hadoop.hbase.client.HTable#getScanner(Scan)} and then
- invoke next on the returned object. Both {@link org.apache.hadoop.hbase.client.HTable#get(Get)} and
- {@link org.apache.hadoop.hbase.client.HTable#getScanner(Scan)} return a
+ use {@link org.apache.hadoop.hbase.client.Admin}. Once created, table access is via an instance
+ of {@link org.apache.hadoop.hbase.client.Table}. You add content to a table a row at a time. To
+ insert, create an instance of a {@link org.apache.hadoop.hbase.client.Put} object. Specify value,
+ target column and optionally a timestamp. Commit your update using
+ {@link org.apache.hadoop.hbase.client.Table#put(Put)}.
+ To fetch your inserted value, use {@link org.apache.hadoop.hbase.client.Get}. The Get can be
+ specified to be broad -- get all on a particular row -- or narrow; i.e. return only a single cell
+ value. After creating an instance of
+ Get, invoke {@link org.apache.hadoop.hbase.client.Table#get(Get)}.
+
+
Use {@link org.apache.hadoop.hbase.client.Scan} to set up a scanner -- a Cursor- like access.
+ After creating and configuring your Scan instance, call
+ {@link org.apache.hadoop.hbase.client.Table#getScanner(Scan)} and then
+ invoke next on the returned object. Both {@link org.apache.hadoop.hbase.client.Table#get(Get)}
+ and {@link org.apache.hadoop.hbase.client.Table#getScanner(Scan)} return a
{@link org.apache.hadoop.hbase.client.Result}.
-A Result is a List of {@link org.apache.hadoop.hbase.KeyValue}s. It has facility for packaging the return
-in different formats.
- Use {@link org.apache.hadoop.hbase.client.Delete} to remove content.
+
+
Use {@link org.apache.hadoop.hbase.client.Delete} to remove content.
You can remove individual cells or entire families, etc. Pass it to
- {@link org.apache.hadoop.hbase.client.HTable#delete(Delete)} to execute.
+ {@link org.apache.hadoop.hbase.client.Table#delete(Delete)} to execute.
Puts, Gets and Deletes take out a lock on the target row for the duration of their operation.
Concurrent modifications to a single row are serialized. Gets and scans run concurrently without
@@ -68,8 +71,11 @@ in different formats.
import java.io.IOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
@@ -87,9 +93,16 @@ public class MyLittleHBaseClient {
// be found on the CLASSPATH
Configuration config = HBaseConfiguration.create();
- // This instantiates an HTable object that connects you to
- // the "myLittleHBaseTable" table.
- HTable table = new HTable(config, "myLittleHBaseTable");
+ // Next you need a Connection to the cluster. Create one. When done with it,
+ // close it (Should start a try/finally after this creation so it gets closed
+ // for sure but leaving this out for readibility's sake).
+ Connection connection = ConnectionFactory.createConnection(config);
+
+ // This instantiates a Table object that connects you to
+ // the "myLittleHBaseTable" table (TableName.valueOf turns String into TableName instance).
+ // When done with it, close it (Should start a try/finally after this creation so it gets
+ // closed for sure but leaving this out for readibility's sake).
+ Table table = connection.getTable(TableName.valueOf("myLittleHBaseTable"));
// To add to a row, use Put. A Put constructor takes the name of the row
// you want to insert into as a byte array. In HBase, the Bytes class has
@@ -152,15 +165,19 @@ public class MyLittleHBaseClient {
// Thats why we have it inside a try/finally clause
scanner.close();
}
+
+ // Close your table and cluster connection.
+ table.close();
+ connection.close();
}
}
There are many other methods for putting data into and getting data out of - HBase, but these examples should get you started. See the HTable javadoc for + HBase, but these examples should get you started. See the Table javadoc for more methods. Additionally, there are methods for managing tables in the - HBaseAdmin class.
+ Admin class.If your client is NOT Java, then you should consider the Thrift or REST libraries.
@@ -168,20 +185,14 @@ public class MyLittleHBaseClient {There are many other methods for putting data into and getting data out of - HBase, but these examples should get you started. See the HTable javadoc for - more methods. Additionally, there are methods for managing tables in the - HBaseAdmin class.
-See also the section in the HBase Reference Guide where it discusses HBase Client. It - has section on how to access HBase from inside your multithreaded environtment + has section on how to access HBase from inside your multithreaded environment how to control resources consumed client-side, etc.